1 /* SPDX-License-Identifier: ISC */ 2 3 #include <linux/etherdevice.h> 4 #include <linux/platform_device.h> 5 #include <linux/pci.h> 6 #include <linux/module.h> 7 #include "mt7603.h" 8 #include "mac.h" 9 #include "eeprom.h" 10 11 static int 12 mt7603_start(struct ieee80211_hw *hw) 13 { 14 struct mt7603_dev *dev = hw->priv; 15 16 mt7603_mac_start(dev); 17 dev->survey_time = ktime_get_boottime(); 18 set_bit(MT76_STATE_RUNNING, &dev->mt76.state); 19 mt7603_mac_work(&dev->mac_work.work); 20 21 return 0; 22 } 23 24 static void 25 mt7603_stop(struct ieee80211_hw *hw) 26 { 27 struct mt7603_dev *dev = hw->priv; 28 29 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); 30 cancel_delayed_work_sync(&dev->mac_work); 31 mt7603_mac_stop(dev); 32 } 33 34 static int 35 mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 36 { 37 struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 38 struct mt7603_dev *dev = hw->priv; 39 struct mt76_txq *mtxq; 40 u8 bc_addr[ETH_ALEN]; 41 int idx; 42 int ret = 0; 43 44 mutex_lock(&dev->mt76.mutex); 45 46 mvif->idx = ffs(~dev->vif_mask) - 1; 47 if (mvif->idx >= MT7603_MAX_INTERFACES) { 48 ret = -ENOSPC; 49 goto out; 50 } 51 52 mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 53 get_unaligned_le32(vif->addr)); 54 mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 55 (get_unaligned_le16(vif->addr + 4) | 56 MT_MAC_ADDR1_VALID)); 57 58 if (vif->type == NL80211_IFTYPE_AP) { 59 mt76_wr(dev, MT_BSSID0(mvif->idx), 60 get_unaligned_le32(vif->addr)); 61 mt76_wr(dev, MT_BSSID1(mvif->idx), 62 (get_unaligned_le16(vif->addr + 4) | 63 MT_BSSID1_VALID)); 64 } 65 66 idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; 67 dev->vif_mask |= BIT(mvif->idx); 68 mvif->sta.wcid.idx = idx; 69 mvif->sta.wcid.hw_key_idx = -1; 70 71 eth_broadcast_addr(bc_addr); 72 mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); 73 74 mtxq = (struct mt76_txq *)vif->txq->drv_priv; 75 mtxq->wcid = &mvif->sta.wcid; 76 mt76_txq_init(&dev->mt76, vif->txq); 77 rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); 78 79 out: 80 mutex_unlock(&dev->mt76.mutex); 81 82 return ret; 83 } 84 85 static void 86 mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 87 { 88 struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 89 struct mt7603_dev *dev = hw->priv; 90 int idx = mvif->sta.wcid.idx; 91 92 mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0); 93 mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0); 94 mt76_wr(dev, MT_BSSID0(mvif->idx), 0); 95 mt76_wr(dev, MT_BSSID1(mvif->idx), 0); 96 mt7603_beacon_set_timer(dev, mvif->idx, 0); 97 98 rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 99 mt76_txq_remove(&dev->mt76, vif->txq); 100 101 mutex_lock(&dev->mt76.mutex); 102 dev->vif_mask &= ~BIT(mvif->idx); 103 mutex_unlock(&dev->mt76.mutex); 104 } 105 106 static void 107 mt7603_init_edcca(struct mt7603_dev *dev) 108 { 109 /* Set lower signal level to -65dBm */ 110 mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23); 111 112 /* clear previous energy detect monitor results */ 113 mt76_rr(dev, MT_MIB_STAT_ED); 114 115 if (dev->ed_monitor) 116 mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); 117 else 118 mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); 119 120 dev->ed_strict_mode = 0xff; 121 dev->ed_strong_signal = 0; 122 dev->ed_time = ktime_get_boottime(); 123 124 mt7603_edcca_set_strict(dev, false); 125 } 126 127 static int 128 mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) 129 { 130 u8 *rssi_data = (u8 *)dev->mt76.eeprom.data; 131 int idx, ret; 132 u8 bw = MT_BW_20; 133 bool failed = false; 134 135 cancel_delayed_work_sync(&dev->mac_work); 136 137 mutex_lock(&dev->mt76.mutex); 138 set_bit(MT76_RESET, &dev->mt76.state); 139 140 mt76_set_channel(&dev->mt76); 141 mt7603_mac_stop(dev); 142 143 if (def->width == NL80211_CHAN_WIDTH_40) 144 bw = MT_BW_40; 145 146 dev->mt76.chandef = *def; 147 mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw); 148 ret = mt7603_mcu_set_channel(dev); 149 if (ret) { 150 failed = true; 151 goto out; 152 } 153 154 if (def->chan->band == NL80211_BAND_5GHZ) { 155 idx = 1; 156 rssi_data += MT_EE_RSSI_OFFSET_5G; 157 } else { 158 idx = 0; 159 rssi_data += MT_EE_RSSI_OFFSET_2G; 160 } 161 162 memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset)); 163 164 idx |= (def->chan - 165 mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1; 166 mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx); 167 mt7603_mac_set_timing(dev); 168 mt7603_mac_start(dev); 169 170 clear_bit(MT76_RESET, &dev->mt76.state); 171 172 mt76_txq_schedule_all(&dev->mt76); 173 174 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, 175 MT7603_WATCHDOG_TIME); 176 177 /* reset channel stats */ 178 mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS); 179 mt76_set(dev, MT_MIB_CTL, 180 MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME); 181 mt76_rr(dev, MT_MIB_STAT_PSCCA); 182 mt7603_cca_stats_reset(dev); 183 184 dev->survey_time = ktime_get_boottime(); 185 186 mt7603_init_edcca(dev); 187 188 out: 189 mutex_unlock(&dev->mt76.mutex); 190 191 if (failed) 192 mt7603_mac_work(&dev->mac_work.work); 193 194 return ret; 195 } 196 197 static int 198 mt7603_config(struct ieee80211_hw *hw, u32 changed) 199 { 200 struct mt7603_dev *dev = hw->priv; 201 int ret = 0; 202 203 if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | 204 IEEE80211_CONF_CHANGE_POWER)) 205 ret = mt7603_set_channel(dev, &hw->conf.chandef); 206 207 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 208 mutex_lock(&dev->mt76.mutex); 209 210 if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) 211 dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; 212 else 213 dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; 214 215 mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); 216 217 mutex_unlock(&dev->mt76.mutex); 218 } 219 220 return ret; 221 } 222 223 static void 224 mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, 225 unsigned int *total_flags, u64 multicast) 226 { 227 struct mt7603_dev *dev = hw->priv; 228 u32 flags = 0; 229 230 #define MT76_FILTER(_flag, _hw) do { \ 231 flags |= *total_flags & FIF_##_flag; \ 232 dev->rxfilter &= ~(_hw); \ 233 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 234 } while (0) 235 236 dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | 237 MT_WF_RFCR_DROP_OTHER_BEACON | 238 MT_WF_RFCR_DROP_FRAME_REPORT | 239 MT_WF_RFCR_DROP_PROBEREQ | 240 MT_WF_RFCR_DROP_MCAST_FILTERED | 241 MT_WF_RFCR_DROP_MCAST | 242 MT_WF_RFCR_DROP_BCAST | 243 MT_WF_RFCR_DROP_DUPLICATE | 244 MT_WF_RFCR_DROP_A2_BSSID | 245 MT_WF_RFCR_DROP_UNWANTED_CTL | 246 MT_WF_RFCR_DROP_STBC_MULTI); 247 248 MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | 249 MT_WF_RFCR_DROP_A3_MAC | 250 MT_WF_RFCR_DROP_A3_BSSID); 251 252 MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); 253 254 MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | 255 MT_WF_RFCR_DROP_RTS | 256 MT_WF_RFCR_DROP_CTL_RSV | 257 MT_WF_RFCR_DROP_NDPA); 258 259 *total_flags = flags; 260 mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); 261 } 262 263 static void 264 mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 265 struct ieee80211_bss_conf *info, u32 changed) 266 { 267 struct mt7603_dev *dev = hw->priv; 268 struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 269 270 mutex_lock(&dev->mt76.mutex); 271 272 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) { 273 if (info->assoc || info->ibss_joined) { 274 mt76_wr(dev, MT_BSSID0(mvif->idx), 275 get_unaligned_le32(info->bssid)); 276 mt76_wr(dev, MT_BSSID1(mvif->idx), 277 (get_unaligned_le16(info->bssid + 4) | 278 MT_BSSID1_VALID)); 279 } else { 280 mt76_wr(dev, MT_BSSID0(mvif->idx), 0); 281 mt76_wr(dev, MT_BSSID1(mvif->idx), 0); 282 } 283 } 284 285 if (changed & BSS_CHANGED_ERP_SLOT) { 286 int slottime = info->use_short_slot ? 9 : 20; 287 288 if (slottime != dev->slottime) { 289 dev->slottime = slottime; 290 mt7603_mac_set_timing(dev); 291 } 292 } 293 294 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) { 295 int beacon_int = !!info->enable_beacon * info->beacon_int; 296 297 tasklet_disable(&dev->pre_tbtt_tasklet); 298 mt7603_beacon_set_timer(dev, mvif->idx, beacon_int); 299 tasklet_enable(&dev->pre_tbtt_tasklet); 300 } 301 302 mutex_unlock(&dev->mt76.mutex); 303 } 304 305 int 306 mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, 307 struct ieee80211_sta *sta) 308 { 309 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 310 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 311 struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 312 int idx; 313 int ret = 0; 314 315 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1); 316 if (idx < 0) 317 return -ENOSPC; 318 319 __skb_queue_head_init(&msta->psq); 320 msta->ps = ~0; 321 msta->smps = ~0; 322 msta->wcid.sta = 1; 323 msta->wcid.idx = idx; 324 mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr); 325 mt7603_wtbl_set_ps(dev, msta, false); 326 327 if (vif->type == NL80211_IFTYPE_AP) 328 set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); 329 330 return ret; 331 } 332 333 void 334 mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, 335 struct ieee80211_sta *sta) 336 { 337 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 338 339 mt7603_wtbl_update_cap(dev, sta); 340 } 341 342 void 343 mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, 344 struct ieee80211_sta *sta) 345 { 346 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 347 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 348 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 349 350 spin_lock_bh(&dev->ps_lock); 351 __skb_queue_purge(&msta->psq); 352 mt7603_filter_tx(dev, wcid->idx, true); 353 spin_unlock_bh(&dev->ps_lock); 354 355 mt7603_wtbl_clear(dev, wcid->idx); 356 } 357 358 static void 359 mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list) 360 { 361 struct sk_buff *skb; 362 363 while ((skb = __skb_dequeue(list)) != NULL) 364 mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb), 365 skb, 0); 366 } 367 368 void 369 mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 370 { 371 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 372 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 373 struct sk_buff_head list; 374 375 mt76_stop_tx_queues(&dev->mt76, sta, true); 376 mt7603_wtbl_set_ps(dev, msta, ps); 377 if (ps) 378 return; 379 380 __skb_queue_head_init(&list); 381 382 spin_lock_bh(&dev->ps_lock); 383 skb_queue_splice_tail_init(&msta->psq, &list); 384 spin_unlock_bh(&dev->ps_lock); 385 386 mt7603_ps_tx_list(dev, &list); 387 } 388 389 static void 390 mt7603_ps_set_more_data(struct sk_buff *skb) 391 { 392 struct ieee80211_hdr *hdr; 393 394 hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE]; 395 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 396 } 397 398 static void 399 mt7603_release_buffered_frames(struct ieee80211_hw *hw, 400 struct ieee80211_sta *sta, 401 u16 tids, int nframes, 402 enum ieee80211_frame_release_type reason, 403 bool more_data) 404 { 405 struct mt7603_dev *dev = hw->priv; 406 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 407 struct sk_buff_head list; 408 struct sk_buff *skb, *tmp; 409 410 __skb_queue_head_init(&list); 411 412 mt7603_wtbl_set_ps(dev, msta, false); 413 414 spin_lock_bh(&dev->ps_lock); 415 skb_queue_walk_safe(&msta->psq, skb, tmp) { 416 if (!nframes) 417 break; 418 419 if (!(tids & BIT(skb->priority))) 420 continue; 421 422 skb_set_queue_mapping(skb, MT_TXQ_PSD); 423 __skb_unlink(skb, &msta->psq); 424 mt7603_ps_set_more_data(skb); 425 __skb_queue_tail(&list, skb); 426 nframes--; 427 } 428 spin_unlock_bh(&dev->ps_lock); 429 430 if (!skb_queue_empty(&list)) 431 ieee80211_sta_eosp(sta); 432 433 mt7603_ps_tx_list(dev, &list); 434 435 if (nframes) 436 mt76_release_buffered_frames(hw, sta, tids, nframes, reason, 437 more_data); 438 } 439 440 static int 441 mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 442 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 443 struct ieee80211_key_conf *key) 444 { 445 struct mt7603_dev *dev = hw->priv; 446 struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 447 struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv : 448 &mvif->sta; 449 struct mt76_wcid *wcid = &msta->wcid; 450 int idx = key->keyidx; 451 452 /* fall back to sw encryption for unsupported ciphers */ 453 switch (key->cipher) { 454 case WLAN_CIPHER_SUITE_TKIP: 455 case WLAN_CIPHER_SUITE_CCMP: 456 break; 457 default: 458 return -EOPNOTSUPP; 459 } 460 461 /* 462 * The hardware does not support per-STA RX GTK, fall back 463 * to software mode for these. 464 */ 465 if ((vif->type == NL80211_IFTYPE_ADHOC || 466 vif->type == NL80211_IFTYPE_MESH_POINT) && 467 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 468 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 469 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 470 return -EOPNOTSUPP; 471 472 if (cmd == SET_KEY) { 473 key->hw_key_idx = wcid->idx; 474 wcid->hw_key_idx = idx; 475 } else { 476 if (idx == wcid->hw_key_idx) 477 wcid->hw_key_idx = -1; 478 479 key = NULL; 480 } 481 mt76_wcid_key_setup(&dev->mt76, wcid, key); 482 483 return mt7603_wtbl_set_key(dev, wcid->idx, key); 484 } 485 486 static int 487 mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 488 const struct ieee80211_tx_queue_params *params) 489 { 490 struct mt7603_dev *dev = hw->priv; 491 u16 cw_min = (1 << 5) - 1; 492 u16 cw_max = (1 << 10) - 1; 493 u32 val; 494 495 queue = dev->mt76.q_tx[queue].hw_idx; 496 497 if (params->cw_min) 498 cw_min = params->cw_min; 499 if (params->cw_max) 500 cw_max = params->cw_max; 501 502 mutex_lock(&dev->mt76.mutex); 503 mt7603_mac_stop(dev); 504 505 val = mt76_rr(dev, MT_WMM_TXOP(queue)); 506 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue)); 507 val |= params->txop << MT_WMM_TXOP_SHIFT(queue); 508 mt76_wr(dev, MT_WMM_TXOP(queue), val); 509 510 val = mt76_rr(dev, MT_WMM_AIFSN); 511 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue)); 512 val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue); 513 mt76_wr(dev, MT_WMM_AIFSN, val); 514 515 val = mt76_rr(dev, MT_WMM_CWMIN); 516 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue)); 517 val |= cw_min << MT_WMM_CWMIN_SHIFT(queue); 518 mt76_wr(dev, MT_WMM_CWMIN, val); 519 520 val = mt76_rr(dev, MT_WMM_CWMAX(queue)); 521 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue)); 522 val |= cw_max << MT_WMM_CWMAX_SHIFT(queue); 523 mt76_wr(dev, MT_WMM_CWMAX(queue), val); 524 525 mt7603_mac_start(dev); 526 mutex_unlock(&dev->mt76.mutex); 527 528 return 0; 529 } 530 531 static void 532 mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 533 const u8 *mac) 534 { 535 struct mt7603_dev *dev = hw->priv; 536 537 set_bit(MT76_SCANNING, &dev->mt76.state); 538 mt7603_beacon_set_timer(dev, -1, 0); 539 } 540 541 static void 542 mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 543 { 544 struct mt7603_dev *dev = hw->priv; 545 546 clear_bit(MT76_SCANNING, &dev->mt76.state); 547 mt7603_beacon_set_timer(dev, -1, dev->beacon_int); 548 } 549 550 static void 551 mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 552 u32 queues, bool drop) 553 { 554 } 555 556 static int 557 mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 558 struct ieee80211_ampdu_params *params) 559 { 560 enum ieee80211_ampdu_mlme_action action = params->action; 561 struct mt7603_dev *dev = hw->priv; 562 struct ieee80211_sta *sta = params->sta; 563 struct ieee80211_txq *txq = sta->txq[params->tid]; 564 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 565 u16 tid = params->tid; 566 u16 *ssn = ¶ms->ssn; 567 u8 ba_size = params->buf_size; 568 struct mt76_txq *mtxq; 569 570 if (!txq) 571 return -EINVAL; 572 573 mtxq = (struct mt76_txq *)txq->drv_priv; 574 575 switch (action) { 576 case IEEE80211_AMPDU_RX_START: 577 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, 578 params->buf_size); 579 mt7603_mac_rx_ba_reset(dev, sta->addr, tid); 580 break; 581 case IEEE80211_AMPDU_RX_STOP: 582 mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); 583 break; 584 case IEEE80211_AMPDU_TX_OPERATIONAL: 585 mtxq->aggr = true; 586 mtxq->send_bar = false; 587 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size); 588 break; 589 case IEEE80211_AMPDU_TX_STOP_FLUSH: 590 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 591 mtxq->aggr = false; 592 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); 593 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); 594 break; 595 case IEEE80211_AMPDU_TX_START: 596 mtxq->agg_ssn = *ssn << 4; 597 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 598 break; 599 case IEEE80211_AMPDU_TX_STOP_CONT: 600 mtxq->aggr = false; 601 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); 602 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 603 break; 604 } 605 606 return 0; 607 } 608 609 static void 610 mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 611 struct ieee80211_sta *sta) 612 { 613 struct mt7603_dev *dev = hw->priv; 614 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 615 struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); 616 int i; 617 618 spin_lock_bh(&dev->mt76.lock); 619 for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { 620 msta->rates[i].idx = sta_rates->rate[i].idx; 621 msta->rates[i].count = sta_rates->rate[i].count; 622 msta->rates[i].flags = sta_rates->rate[i].flags; 623 624 if (msta->rates[i].idx < 0 || !msta->rates[i].count) 625 break; 626 } 627 msta->n_rates = i; 628 mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates); 629 msta->rate_probe = false; 630 mt7603_wtbl_set_smps(dev, msta, 631 sta->smps_mode == IEEE80211_SMPS_DYNAMIC); 632 spin_unlock_bh(&dev->mt76.lock); 633 } 634 635 static void 636 mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) 637 { 638 struct mt7603_dev *dev = hw->priv; 639 640 dev->coverage_class = coverage_class; 641 mt7603_mac_set_timing(dev); 642 } 643 644 static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, 645 struct sk_buff *skb) 646 { 647 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 648 struct ieee80211_vif *vif = info->control.vif; 649 struct mt7603_dev *dev = hw->priv; 650 struct mt76_wcid *wcid = &dev->global_sta.wcid; 651 652 if (control->sta) { 653 struct mt7603_sta *msta; 654 655 msta = (struct mt7603_sta *)control->sta->drv_priv; 656 wcid = &msta->wcid; 657 } else if (vif) { 658 struct mt7603_vif *mvif; 659 660 mvif = (struct mt7603_vif *)vif->drv_priv; 661 wcid = &mvif->sta.wcid; 662 } 663 664 mt76_tx(&dev->mt76, control->sta, wcid, skb); 665 } 666 667 static int 668 mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 669 { 670 return 0; 671 } 672 673 const struct ieee80211_ops mt7603_ops = { 674 .tx = mt7603_tx, 675 .start = mt7603_start, 676 .stop = mt7603_stop, 677 .add_interface = mt7603_add_interface, 678 .remove_interface = mt7603_remove_interface, 679 .config = mt7603_config, 680 .configure_filter = mt7603_configure_filter, 681 .bss_info_changed = mt7603_bss_info_changed, 682 .sta_state = mt76_sta_state, 683 .set_key = mt7603_set_key, 684 .conf_tx = mt7603_conf_tx, 685 .sw_scan_start = mt7603_sw_scan, 686 .sw_scan_complete = mt7603_sw_scan_complete, 687 .flush = mt7603_flush, 688 .ampdu_action = mt7603_ampdu_action, 689 .get_txpower = mt76_get_txpower, 690 .wake_tx_queue = mt76_wake_tx_queue, 691 .sta_rate_tbl_update = mt7603_sta_rate_tbl_update, 692 .release_buffered_frames = mt7603_release_buffered_frames, 693 .set_coverage_class = mt7603_set_coverage_class, 694 .set_tim = mt7603_set_tim, 695 .get_survey = mt76_get_survey, 696 }; 697 698 MODULE_LICENSE("Dual BSD/GPL"); 699 700 static int __init mt7603_init(void) 701 { 702 int ret; 703 704 ret = platform_driver_register(&mt76_wmac_driver); 705 if (ret) 706 return ret; 707 708 #ifdef CONFIG_PCI 709 ret = pci_register_driver(&mt7603_pci_driver); 710 if (ret) 711 platform_driver_unregister(&mt76_wmac_driver); 712 #endif 713 return ret; 714 } 715 716 static void __exit mt7603_exit(void) 717 { 718 #ifdef CONFIG_PCI 719 pci_unregister_driver(&mt7603_pci_driver); 720 #endif 721 platform_driver_unregister(&mt76_wmac_driver); 722 } 723 724 module_init(mt7603_init); 725 module_exit(mt7603_exit); 726