1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> 5 */ 6 7 #include "mt76x02.h" 8 #include "mt76x02_trace.h" 9 10 void mt76x02_mac_reset_counters(struct mt76x02_dev *dev) 11 { 12 int i; 13 14 mt76_rr(dev, MT_RX_STAT_0); 15 mt76_rr(dev, MT_RX_STAT_1); 16 mt76_rr(dev, MT_RX_STAT_2); 17 mt76_rr(dev, MT_TX_STA_0); 18 mt76_rr(dev, MT_TX_STA_1); 19 mt76_rr(dev, MT_TX_STA_2); 20 21 for (i = 0; i < 16; i++) 22 mt76_rr(dev, MT_TX_AGG_CNT(i)); 23 24 for (i = 0; i < 16; i++) 25 mt76_rr(dev, MT_TX_STAT_FIFO); 26 27 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 28 } 29 EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters); 30 31 static enum mt76x02_cipher_type 32 mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 33 { 34 memset(key_data, 0, 32); 35 if (!key) 36 return MT_CIPHER_NONE; 37 38 if (key->keylen > 32) 39 return MT_CIPHER_NONE; 40 41 memcpy(key_data, key->key, key->keylen); 42 43 switch (key->cipher) { 44 case WLAN_CIPHER_SUITE_WEP40: 45 return MT_CIPHER_WEP40; 46 case WLAN_CIPHER_SUITE_WEP104: 47 return MT_CIPHER_WEP104; 48 case WLAN_CIPHER_SUITE_TKIP: 49 return MT_CIPHER_TKIP; 50 case WLAN_CIPHER_SUITE_CCMP: 51 return MT_CIPHER_AES_CCMP; 52 default: 53 return MT_CIPHER_NONE; 54 } 55 } 56 57 int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, 58 u8 key_idx, struct ieee80211_key_conf *key) 59 { 60 enum mt76x02_cipher_type cipher; 61 u8 key_data[32]; 62 u32 val; 63 64 cipher = mt76x02_mac_get_key_info(key, key_data); 65 if (cipher == MT_CIPHER_NONE && key) 66 return -EOPNOTSUPP; 67 68 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); 69 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); 70 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); 71 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); 72 73 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, 74 sizeof(key_data)); 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); 79 80 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx, 81 struct ieee80211_key_conf *key) 82 { 83 enum mt76x02_cipher_type cipher; 84 u8 key_data[32]; 85 u32 iv, eiv; 86 u64 pn; 87 88 cipher = mt76x02_mac_get_key_info(key, key_data); 89 iv = mt76_rr(dev, MT_WCID_IV(idx)); 90 eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4); 91 92 pn = (u64)eiv << 16; 93 if (cipher == MT_CIPHER_TKIP) { 94 pn |= (iv >> 16) & 0xff; 95 pn |= (iv & 0xff) << 8; 96 } else if (cipher >= MT_CIPHER_AES_CCMP) { 97 pn |= iv & 0xffff; 98 } else { 99 return; 100 } 101 102 atomic64_set(&key->tx_pn, pn); 103 } 104 105 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 106 struct ieee80211_key_conf *key) 107 { 108 enum mt76x02_cipher_type cipher; 109 u8 key_data[32]; 110 u8 iv_data[8]; 111 u64 pn; 112 113 cipher = mt76x02_mac_get_key_info(key, key_data); 114 if (cipher == MT_CIPHER_NONE && key) 115 return -EOPNOTSUPP; 116 117 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); 118 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); 119 120 memset(iv_data, 0, sizeof(iv_data)); 121 if (key) { 122 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 123 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 124 125 pn = atomic64_read(&key->tx_pn); 126 127 iv_data[3] = key->keyidx << 6; 128 if (cipher >= MT_CIPHER_TKIP) { 129 iv_data[3] |= 0x20; 130 put_unaligned_le32(pn >> 16, &iv_data[4]); 131 } 132 133 if (cipher == MT_CIPHER_TKIP) { 134 iv_data[0] = (pn >> 8) & 0xff; 135 iv_data[1] = (iv_data[0] | 0x20) & 0x7f; 136 iv_data[2] = pn & 0xff; 137 } else if (cipher >= MT_CIPHER_AES_CCMP) { 138 put_unaligned_le16((pn & 0xffff), &iv_data[0]); 139 } 140 } 141 142 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 143 144 return 0; 145 } 146 147 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, 148 u8 vif_idx, u8 *mac) 149 { 150 struct mt76_wcid_addr addr = {}; 151 u32 attr; 152 153 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | 154 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); 155 156 mt76_wr(dev, MT_WCID_ATTR(idx), attr); 157 158 if (idx >= 128) 159 return; 160 161 if (mac) 162 memcpy(addr.macaddr, mac, ETH_ALEN); 163 164 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); 165 } 166 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup); 167 168 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop) 169 { 170 u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); 171 u32 bit = MT_WCID_DROP_MASK(idx); 172 173 /* prevent unnecessary writes */ 174 if ((val & bit) != (bit * drop)) 175 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); 176 } 177 178 static __le16 179 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, 180 const struct ieee80211_tx_rate *rate, u8 *nss_val) 181 { 182 u8 phy, rate_idx, nss, bw = 0; 183 u16 rateval; 184 185 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 186 rate_idx = rate->idx; 187 nss = 1 + (rate->idx >> 4); 188 phy = MT_PHY_TYPE_VHT; 189 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 190 bw = 2; 191 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 192 bw = 1; 193 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 194 rate_idx = rate->idx; 195 nss = 1 + (rate->idx >> 3); 196 phy = MT_PHY_TYPE_HT; 197 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 198 phy = MT_PHY_TYPE_HT_GF; 199 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 200 bw = 1; 201 } else { 202 const struct ieee80211_rate *r; 203 int band = dev->mt76.chandef.chan->band; 204 u16 val; 205 206 r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx]; 207 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 208 val = r->hw_value_short; 209 else 210 val = r->hw_value; 211 212 phy = val >> 8; 213 rate_idx = val & 0xff; 214 nss = 1; 215 } 216 217 rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); 218 rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); 219 rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); 220 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 221 rateval |= MT_RXWI_RATE_SGI; 222 223 *nss_val = nss; 224 return cpu_to_le16(rateval); 225 } 226 227 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, 228 const struct ieee80211_tx_rate *rate) 229 { 230 s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); 231 __le16 rateval; 232 u32 tx_info; 233 s8 nss; 234 235 rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss); 236 tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) | 237 FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) | 238 FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) | 239 MT_WCID_TX_INFO_SET; 240 wcid->tx_info = tx_info; 241 } 242 243 void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable) 244 { 245 if (enable) 246 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); 247 else 248 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); 249 } 250 251 bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, 252 struct mt76x02_tx_status *stat) 253 { 254 u32 stat1, stat2; 255 256 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); 257 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); 258 259 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); 260 if (!stat->valid) 261 return false; 262 263 stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); 264 stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); 265 stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ); 266 stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1); 267 stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1); 268 269 stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); 270 stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); 271 272 trace_mac_txstat_fetch(dev, stat); 273 274 return true; 275 } 276 277 static int 278 mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, 279 enum nl80211_band band) 280 { 281 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); 282 283 txrate->idx = 0; 284 txrate->flags = 0; 285 txrate->count = 1; 286 287 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { 288 case MT_PHY_TYPE_OFDM: 289 if (band == NL80211_BAND_2GHZ) 290 idx += 4; 291 292 txrate->idx = idx; 293 return 0; 294 case MT_PHY_TYPE_CCK: 295 if (idx >= 8) 296 idx -= 8; 297 298 txrate->idx = idx; 299 return 0; 300 case MT_PHY_TYPE_HT_GF: 301 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; 302 /* fall through */ 303 case MT_PHY_TYPE_HT: 304 txrate->flags |= IEEE80211_TX_RC_MCS; 305 txrate->idx = idx; 306 break; 307 case MT_PHY_TYPE_VHT: 308 txrate->flags |= IEEE80211_TX_RC_VHT_MCS; 309 txrate->idx = idx; 310 break; 311 default: 312 return -EINVAL; 313 } 314 315 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { 316 case MT_PHY_BW_20: 317 break; 318 case MT_PHY_BW_40: 319 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 320 break; 321 case MT_PHY_BW_80: 322 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; 323 break; 324 default: 325 return -EINVAL; 326 } 327 328 if (rate & MT_RXWI_RATE_SGI) 329 txrate->flags |= IEEE80211_TX_RC_SHORT_GI; 330 331 return 0; 332 } 333 334 void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, 335 struct sk_buff *skb, struct mt76_wcid *wcid, 336 struct ieee80211_sta *sta, int len) 337 { 338 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 339 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 340 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 341 struct ieee80211_key_conf *key = info->control.hw_key; 342 u32 wcid_tx_info; 343 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); 344 u16 txwi_flags = 0; 345 u8 nss; 346 s8 txpwr_adj, max_txpwr_adj; 347 u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf; 348 349 memset(txwi, 0, sizeof(*txwi)); 350 351 if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff && 352 ieee80211_has_protected(hdr->frame_control)) { 353 wcid = NULL; 354 ieee80211_get_tx_rates(info->control.vif, sta, skb, 355 info->control.rates, 1); 356 } 357 358 if (wcid) 359 txwi->wcid = wcid->idx; 360 else 361 txwi->wcid = 0xff; 362 363 if (wcid && wcid->sw_iv && key) { 364 u64 pn = atomic64_inc_return(&key->tx_pn); 365 366 ccmp_pn[0] = pn; 367 ccmp_pn[1] = pn >> 8; 368 ccmp_pn[2] = 0; 369 ccmp_pn[3] = 0x20 | (key->keyidx << 6); 370 ccmp_pn[4] = pn >> 16; 371 ccmp_pn[5] = pn >> 24; 372 ccmp_pn[6] = pn >> 32; 373 ccmp_pn[7] = pn >> 40; 374 txwi->iv = *((__le32 *)&ccmp_pn[0]); 375 txwi->eiv = *((__le32 *)&ccmp_pn[4]); 376 } 377 378 if (wcid && (rate->idx < 0 || !rate->count)) { 379 wcid_tx_info = wcid->tx_info; 380 txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info); 381 max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ, 382 wcid_tx_info); 383 nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info); 384 } else { 385 txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); 386 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); 387 } 388 389 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf, 390 max_txpwr_adj); 391 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); 392 393 if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4) 394 txwi->txstream = 0x13; 395 else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 && 396 !(txwi->rate & cpu_to_le16(rate_ht_mask))) 397 txwi->txstream = 0x93; 398 399 if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC)) 400 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); 401 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) 402 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); 403 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 404 txwi_flags |= MT_TXWI_FLAGS_MMPS; 405 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 406 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; 407 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 408 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; 409 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { 410 u8 ba_size = IEEE80211_MIN_AMPDU_BUF; 411 412 ba_size <<= sta->ht_cap.ampdu_factor; 413 ba_size = min_t(int, 63, ba_size - 1); 414 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 415 ba_size = 0; 416 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); 417 418 txwi_flags |= MT_TXWI_FLAGS_AMPDU | 419 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, 420 sta->ht_cap.ampdu_density); 421 } 422 423 if (ieee80211_is_probe_resp(hdr->frame_control) || 424 ieee80211_is_beacon(hdr->frame_control)) 425 txwi_flags |= MT_TXWI_FLAGS_TS; 426 427 txwi->flags |= cpu_to_le16(txwi_flags); 428 txwi->len_ctl = cpu_to_le16(len); 429 } 430 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi); 431 432 static void 433 mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy) 434 { 435 u8 mcs, nss; 436 437 if (!idx) 438 return; 439 440 rates += idx - 1; 441 rates[1] = rates[0]; 442 switch (phy) { 443 case MT_PHY_TYPE_VHT: 444 mcs = ieee80211_rate_get_vht_mcs(rates); 445 nss = ieee80211_rate_get_vht_nss(rates); 446 447 if (mcs == 0) 448 nss = max_t(int, nss - 1, 1); 449 else 450 mcs--; 451 452 ieee80211_rate_set_vht(rates + 1, mcs, nss); 453 break; 454 case MT_PHY_TYPE_HT_GF: 455 case MT_PHY_TYPE_HT: 456 /* MCS 8 falls back to MCS 0 */ 457 if (rates[0].idx == 8) { 458 rates[1].idx = 0; 459 break; 460 } 461 /* fall through */ 462 default: 463 rates[1].idx = max_t(int, rates[0].idx - 1, 0); 464 break; 465 } 466 } 467 468 static void 469 mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta, 470 struct ieee80211_tx_info *info, 471 struct mt76x02_tx_status *st, int n_frames) 472 { 473 struct ieee80211_tx_rate *rate = info->status.rates; 474 struct ieee80211_tx_rate last_rate; 475 u16 first_rate; 476 int retry = st->retry; 477 int phy; 478 int i; 479 480 if (!n_frames) 481 return; 482 483 phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate); 484 485 if (st->pktid & MT_PACKET_ID_HAS_RATE) { 486 first_rate = st->rate & ~MT_PKTID_RATE; 487 first_rate |= st->pktid & MT_PKTID_RATE; 488 489 mt76x02_mac_process_tx_rate(&rate[0], first_rate, 490 dev->mt76.chandef.chan->band); 491 } else if (rate[0].idx < 0) { 492 if (!msta) 493 return; 494 495 mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info, 496 dev->mt76.chandef.chan->band); 497 } 498 499 mt76x02_mac_process_tx_rate(&last_rate, st->rate, 500 dev->mt76.chandef.chan->band); 501 502 for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) { 503 retry--; 504 if (i + 1 == ARRAY_SIZE(info->status.rates)) { 505 info->status.rates[i] = last_rate; 506 info->status.rates[i].count = max_t(int, retry, 1); 507 break; 508 } 509 510 mt76x02_tx_rate_fallback(info->status.rates, i, phy); 511 if (info->status.rates[i].idx == last_rate.idx) 512 break; 513 } 514 515 if (i + 1 < ARRAY_SIZE(info->status.rates)) { 516 info->status.rates[i + 1].idx = -1; 517 info->status.rates[i + 1].count = 0; 518 } 519 520 info->status.ampdu_len = n_frames; 521 info->status.ampdu_ack_len = st->success ? n_frames : 0; 522 523 if (st->aggr) 524 info->flags |= IEEE80211_TX_CTL_AMPDU | 525 IEEE80211_TX_STAT_AMPDU; 526 527 if (!st->ack_req) 528 info->flags |= IEEE80211_TX_CTL_NO_ACK; 529 else if (st->success) 530 info->flags |= IEEE80211_TX_STAT_ACK; 531 } 532 533 void mt76x02_send_tx_status(struct mt76x02_dev *dev, 534 struct mt76x02_tx_status *stat, u8 *update) 535 { 536 struct ieee80211_tx_info info = {}; 537 struct ieee80211_tx_status status = { 538 .info = &info 539 }; 540 static const u8 ac_to_tid[4] = { 541 [IEEE80211_AC_BE] = 0, 542 [IEEE80211_AC_BK] = 1, 543 [IEEE80211_AC_VI] = 4, 544 [IEEE80211_AC_VO] = 6 545 }; 546 struct mt76_wcid *wcid = NULL; 547 struct mt76x02_sta *msta = NULL; 548 struct mt76_dev *mdev = &dev->mt76; 549 struct sk_buff_head list; 550 u32 duration = 0; 551 u8 cur_pktid; 552 u32 ac = 0; 553 int len = 0; 554 555 if (stat->pktid == MT_PACKET_ID_NO_ACK) 556 return; 557 558 rcu_read_lock(); 559 560 if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) 561 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); 562 563 if (wcid && wcid->sta) { 564 void *priv; 565 566 priv = msta = container_of(wcid, struct mt76x02_sta, wcid); 567 status.sta = container_of(priv, struct ieee80211_sta, 568 drv_priv); 569 } 570 571 mt76_tx_status_lock(mdev, &list); 572 573 if (wcid) { 574 if (mt76_is_skb_pktid(stat->pktid)) 575 status.skb = mt76_tx_status_skb_get(mdev, wcid, 576 stat->pktid, &list); 577 if (status.skb) 578 status.info = IEEE80211_SKB_CB(status.skb); 579 } 580 581 if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) { 582 mt76_tx_status_unlock(mdev, &list); 583 goto out; 584 } 585 586 587 if (msta && stat->aggr && !status.skb) { 588 u32 stat_val, stat_cache; 589 590 stat_val = stat->rate; 591 stat_val |= ((u32)stat->retry) << 16; 592 stat_cache = msta->status.rate; 593 stat_cache |= ((u32)msta->status.retry) << 16; 594 595 if (*update == 0 && stat_val == stat_cache && 596 stat->wcid == msta->status.wcid && msta->n_frames < 32) { 597 msta->n_frames++; 598 mt76_tx_status_unlock(mdev, &list); 599 goto out; 600 } 601 602 cur_pktid = msta->status.pktid; 603 mt76x02_mac_fill_tx_status(dev, msta, status.info, 604 &msta->status, msta->n_frames); 605 606 msta->status = *stat; 607 msta->n_frames = 1; 608 *update = 0; 609 } else { 610 cur_pktid = stat->pktid; 611 mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1); 612 *update = 1; 613 } 614 615 if (status.skb) { 616 info = *status.info; 617 len = status.skb->len; 618 ac = skb_get_queue_mapping(status.skb); 619 mt76_tx_status_skb_done(mdev, status.skb, &list); 620 } else if (msta) { 621 len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen); 622 ac = FIELD_GET(MT_PKTID_AC, cur_pktid); 623 } 624 625 mt76_tx_status_unlock(mdev, &list); 626 627 if (!status.skb) 628 ieee80211_tx_status_ext(mt76_hw(dev), &status); 629 630 if (!len) 631 goto out; 632 633 duration = mt76_calc_tx_airtime(&dev->mt76, &info, len); 634 635 spin_lock_bh(&dev->mt76.cc_lock); 636 dev->tx_airtime += duration; 637 spin_unlock_bh(&dev->mt76.cc_lock); 638 639 if (msta) 640 ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0); 641 642 out: 643 rcu_read_unlock(); 644 } 645 646 static int 647 mt76x02_mac_process_rate(struct mt76x02_dev *dev, 648 struct mt76_rx_status *status, 649 u16 rate) 650 { 651 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); 652 653 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { 654 case MT_PHY_TYPE_OFDM: 655 if (idx >= 8) 656 idx = 0; 657 658 if (status->band == NL80211_BAND_2GHZ) 659 idx += 4; 660 661 status->rate_idx = idx; 662 return 0; 663 case MT_PHY_TYPE_CCK: 664 if (idx >= 8) { 665 idx -= 8; 666 status->enc_flags |= RX_ENC_FLAG_SHORTPRE; 667 } 668 669 if (idx >= 4) 670 idx = 0; 671 672 status->rate_idx = idx; 673 return 0; 674 case MT_PHY_TYPE_HT_GF: 675 status->enc_flags |= RX_ENC_FLAG_HT_GF; 676 /* fall through */ 677 case MT_PHY_TYPE_HT: 678 status->encoding = RX_ENC_HT; 679 status->rate_idx = idx; 680 break; 681 case MT_PHY_TYPE_VHT: { 682 u8 n_rxstream = dev->mt76.chainmask & 0xf; 683 684 status->encoding = RX_ENC_VHT; 685 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); 686 status->nss = min_t(u8, n_rxstream, 687 FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1); 688 break; 689 } 690 default: 691 return -EINVAL; 692 } 693 694 if (rate & MT_RXWI_RATE_LDPC) 695 status->enc_flags |= RX_ENC_FLAG_LDPC; 696 697 if (rate & MT_RXWI_RATE_SGI) 698 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 699 700 if (rate & MT_RXWI_RATE_STBC) 701 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; 702 703 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) { 704 case MT_PHY_BW_20: 705 break; 706 case MT_PHY_BW_40: 707 status->bw = RATE_INFO_BW_40; 708 break; 709 case MT_PHY_BW_80: 710 status->bw = RATE_INFO_BW_80; 711 break; 712 default: 713 break; 714 } 715 716 return 0; 717 } 718 719 void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr) 720 { 721 static const u8 null_addr[ETH_ALEN] = {}; 722 int i; 723 724 ether_addr_copy(dev->mt76.macaddr, addr); 725 726 if (!is_valid_ether_addr(dev->mt76.macaddr)) { 727 eth_random_addr(dev->mt76.macaddr); 728 dev_info(dev->mt76.dev, 729 "Invalid MAC address, using random address %pM\n", 730 dev->mt76.macaddr); 731 } 732 733 mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); 734 mt76_wr(dev, MT_MAC_ADDR_DW1, 735 get_unaligned_le16(dev->mt76.macaddr + 4) | 736 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); 737 738 mt76_wr(dev, MT_MAC_BSSID_DW0, 739 get_unaligned_le32(dev->mt76.macaddr)); 740 mt76_wr(dev, MT_MAC_BSSID_DW1, 741 get_unaligned_le16(dev->mt76.macaddr + 4) | 742 FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */ 743 MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); 744 745 for (i = 0; i < 16; i++) 746 mt76x02_mac_set_bssid(dev, i, null_addr); 747 } 748 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); 749 750 static int 751 mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain) 752 { 753 struct mt76x02_rx_freq_cal *cal = &dev->cal.rx; 754 755 rssi += cal->rssi_offset[chain]; 756 rssi -= cal->lna_gain; 757 758 return rssi; 759 } 760 761 int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, 762 void *rxi) 763 { 764 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 765 struct mt76x02_rxwi *rxwi = rxi; 766 struct mt76x02_sta *sta; 767 u32 rxinfo = le32_to_cpu(rxwi->rxinfo); 768 u32 ctl = le32_to_cpu(rxwi->ctl); 769 u16 rate = le16_to_cpu(rxwi->rate); 770 u16 tid_sn = le16_to_cpu(rxwi->tid_sn); 771 bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); 772 int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf; 773 s8 signal; 774 u8 pn_len; 775 u8 wcid; 776 int len; 777 778 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) 779 return -EINVAL; 780 781 if (rxinfo & MT_RXINFO_L2PAD) 782 pad_len += 2; 783 784 if (rxinfo & MT_RXINFO_DECRYPT) { 785 status->flag |= RX_FLAG_DECRYPTED; 786 status->flag |= RX_FLAG_MMIC_STRIPPED; 787 status->flag |= RX_FLAG_MIC_STRIPPED; 788 status->flag |= RX_FLAG_IV_STRIPPED; 789 } 790 791 wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); 792 sta = mt76x02_rx_get_sta(&dev->mt76, wcid); 793 status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast); 794 795 len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); 796 pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); 797 if (pn_len) { 798 int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; 799 u8 *data = skb->data + offset; 800 801 status->iv[0] = data[7]; 802 status->iv[1] = data[6]; 803 status->iv[2] = data[5]; 804 status->iv[3] = data[4]; 805 status->iv[4] = data[1]; 806 status->iv[5] = data[0]; 807 808 /* 809 * Driver CCMP validation can't deal with fragments. 810 * Let mac80211 take care of it. 811 */ 812 if (rxinfo & MT_RXINFO_FRAG) { 813 status->flag &= ~RX_FLAG_IV_STRIPPED; 814 } else { 815 pad_len += pn_len << 2; 816 len -= pn_len << 2; 817 } 818 } 819 820 mt76x02_remove_hdr_pad(skb, pad_len); 821 822 if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) 823 status->aggr = true; 824 825 if (rxinfo & MT_RXINFO_AMPDU) { 826 status->flag |= RX_FLAG_AMPDU_DETAILS; 827 status->ampdu_ref = dev->mt76.ampdu_ref; 828 829 /* 830 * When receiving an A-MPDU subframe and RSSI info is not valid, 831 * we can assume that more subframes belonging to the same A-MPDU 832 * are coming. The last one will have valid RSSI info 833 */ 834 if (rxinfo & MT_RXINFO_RSSI) { 835 if (!++dev->mt76.ampdu_ref) 836 dev->mt76.ampdu_ref++; 837 } 838 } 839 840 if (WARN_ON_ONCE(len > skb->len)) 841 return -EINVAL; 842 843 pskb_trim(skb, len); 844 845 status->chains = BIT(0); 846 signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); 847 status->chain_signal[0] = signal; 848 if (nstreams > 1) { 849 status->chains |= BIT(1); 850 status->chain_signal[1] = mt76x02_mac_get_rssi(dev, 851 rxwi->rssi[1], 852 1); 853 signal = max_t(s8, signal, status->chain_signal[1]); 854 } 855 status->signal = signal; 856 status->freq = dev->mt76.chandef.chan->center_freq; 857 status->band = dev->mt76.chandef.chan->band; 858 859 status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); 860 status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); 861 862 return mt76x02_mac_process_rate(dev, status, rate); 863 } 864 865 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) 866 { 867 struct mt76x02_tx_status stat = {}; 868 u8 update = 1; 869 bool ret; 870 871 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) 872 return; 873 874 trace_mac_txstat_poll(dev); 875 876 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { 877 if (!spin_trylock(&dev->txstatus_fifo_lock)) 878 break; 879 880 ret = mt76x02_mac_load_tx_status(dev, &stat); 881 spin_unlock(&dev->txstatus_fifo_lock); 882 883 if (!ret) 884 break; 885 886 if (!irq) { 887 mt76x02_send_tx_status(dev, &stat, &update); 888 continue; 889 } 890 891 kfifo_put(&dev->txstatus_fifo, stat); 892 } 893 } 894 895 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 896 struct mt76_queue_entry *e) 897 { 898 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 899 struct mt76x02_txwi *txwi; 900 u8 *txwi_ptr; 901 902 if (!e->txwi) { 903 dev_kfree_skb_any(e->skb); 904 return; 905 } 906 907 mt76x02_mac_poll_tx_status(dev, false); 908 909 txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi); 910 txwi = (struct mt76x02_txwi *)txwi_ptr; 911 trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); 912 913 mt76_tx_complete_skb(mdev, e->skb); 914 } 915 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); 916 917 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val) 918 { 919 u32 data = 0; 920 921 if (val != ~0) 922 data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) | 923 MT_PROT_CFG_RTS_THRESH; 924 925 mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val); 926 927 mt76_rmw(dev, MT_CCK_PROT_CFG, 928 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); 929 mt76_rmw(dev, MT_OFDM_PROT_CFG, 930 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); 931 } 932 933 void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot, 934 int ht_mode) 935 { 936 int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; 937 bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 938 u32 prot[6]; 939 u32 vht_prot[3]; 940 int i; 941 u16 rts_thr; 942 943 for (i = 0; i < ARRAY_SIZE(prot); i++) { 944 prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4); 945 prot[i] &= ~MT_PROT_CFG_CTRL; 946 if (i >= 2) 947 prot[i] &= ~MT_PROT_CFG_RATE; 948 } 949 950 for (i = 0; i < ARRAY_SIZE(vht_prot); i++) { 951 vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4); 952 vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE); 953 } 954 955 rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH); 956 957 if (rts_thr != 0xffff) 958 prot[0] |= MT_PROT_CTRL_RTS_CTS; 959 960 if (legacy_prot) { 961 prot[1] |= MT_PROT_CTRL_CTS2SELF; 962 963 prot[2] |= MT_PROT_RATE_CCK_11; 964 prot[3] |= MT_PROT_RATE_CCK_11; 965 prot[4] |= MT_PROT_RATE_CCK_11; 966 prot[5] |= MT_PROT_RATE_CCK_11; 967 968 vht_prot[0] |= MT_PROT_RATE_CCK_11; 969 vht_prot[1] |= MT_PROT_RATE_CCK_11; 970 vht_prot[2] |= MT_PROT_RATE_CCK_11; 971 } else { 972 if (rts_thr != 0xffff) 973 prot[1] |= MT_PROT_CTRL_RTS_CTS; 974 975 prot[2] |= MT_PROT_RATE_OFDM_24; 976 prot[3] |= MT_PROT_RATE_DUP_OFDM_24; 977 prot[4] |= MT_PROT_RATE_OFDM_24; 978 prot[5] |= MT_PROT_RATE_DUP_OFDM_24; 979 980 vht_prot[0] |= MT_PROT_RATE_OFDM_24; 981 vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24; 982 vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24; 983 } 984 985 switch (mode) { 986 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: 987 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: 988 prot[2] |= MT_PROT_CTRL_RTS_CTS; 989 prot[3] |= MT_PROT_CTRL_RTS_CTS; 990 prot[4] |= MT_PROT_CTRL_RTS_CTS; 991 prot[5] |= MT_PROT_CTRL_RTS_CTS; 992 vht_prot[0] |= MT_PROT_CTRL_RTS_CTS; 993 vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; 994 vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; 995 break; 996 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: 997 prot[3] |= MT_PROT_CTRL_RTS_CTS; 998 prot[5] |= MT_PROT_CTRL_RTS_CTS; 999 vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; 1000 vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; 1001 break; 1002 } 1003 1004 if (non_gf) { 1005 prot[4] |= MT_PROT_CTRL_RTS_CTS; 1006 prot[5] |= MT_PROT_CTRL_RTS_CTS; 1007 } 1008 1009 for (i = 0; i < ARRAY_SIZE(prot); i++) 1010 mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); 1011 1012 for (i = 0; i < ARRAY_SIZE(vht_prot); i++) 1013 mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]); 1014 } 1015 1016 void mt76x02_update_channel(struct mt76_dev *mdev) 1017 { 1018 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 1019 struct mt76_channel_state *state; 1020 1021 state = mdev->chan_state; 1022 state->cc_busy += mt76_rr(dev, MT_CH_BUSY); 1023 1024 spin_lock_bh(&dev->mt76.cc_lock); 1025 state->cc_tx += dev->tx_airtime; 1026 dev->tx_airtime = 0; 1027 spin_unlock_bh(&dev->mt76.cc_lock); 1028 } 1029 EXPORT_SYMBOL_GPL(mt76x02_update_channel); 1030 1031 static void mt76x02_check_mac_err(struct mt76x02_dev *dev) 1032 { 1033 u32 val = mt76_rr(dev, 0x10f4); 1034 1035 if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) 1036 return; 1037 1038 dev_err(dev->mt76.dev, "mac specific condition occurred\n"); 1039 1040 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); 1041 udelay(10); 1042 mt76_wr(dev, MT_MAC_SYS_CTRL, 1043 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); 1044 } 1045 1046 static void 1047 mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable) 1048 { 1049 if (enable) { 1050 u32 data; 1051 1052 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); 1053 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); 1054 /* enable pa-lna */ 1055 data = mt76_rr(dev, MT_TX_PIN_CFG); 1056 data |= MT_TX_PIN_CFG_TXANT | 1057 MT_TX_PIN_CFG_RXANT | 1058 MT_TX_PIN_RFTR_EN | 1059 MT_TX_PIN_TRSW_EN; 1060 mt76_wr(dev, MT_TX_PIN_CFG, data); 1061 } else { 1062 mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); 1063 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); 1064 /* disable pa-lna */ 1065 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT); 1066 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT); 1067 } 1068 dev->ed_tx_blocked = !enable; 1069 } 1070 1071 void mt76x02_edcca_init(struct mt76x02_dev *dev) 1072 { 1073 dev->ed_trigger = 0; 1074 dev->ed_silent = 0; 1075 1076 if (dev->ed_monitor) { 1077 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 1078 u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20; 1079 1080 mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); 1081 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 1082 mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0), 1083 ed_th << 8 | ed_th); 1084 mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); 1085 } else { 1086 mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); 1087 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 1088 if (is_mt76x2(dev)) { 1089 mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); 1090 mt76_set(dev, MT_TXOP_HLDR_ET, 1091 MT_TXOP_HLDR_TX40M_BLK_EN); 1092 } else { 1093 mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464); 1094 mt76_clear(dev, MT_TXOP_HLDR_ET, 1095 MT_TXOP_HLDR_TX40M_BLK_EN); 1096 } 1097 } 1098 mt76x02_edcca_tx_enable(dev, true); 1099 dev->ed_monitor_learning = true; 1100 1101 /* clear previous CCA timer value */ 1102 mt76_rr(dev, MT_ED_CCA_TIMER); 1103 dev->ed_time = ktime_get_boottime(); 1104 } 1105 EXPORT_SYMBOL_GPL(mt76x02_edcca_init); 1106 1107 #define MT_EDCCA_TH 92 1108 #define MT_EDCCA_BLOCK_TH 2 1109 #define MT_EDCCA_LEARN_TH 50 1110 #define MT_EDCCA_LEARN_CCA 180 1111 #define MT_EDCCA_LEARN_TIMEOUT (20 * HZ) 1112 1113 static void mt76x02_edcca_check(struct mt76x02_dev *dev) 1114 { 1115 ktime_t cur_time; 1116 u32 active, val, busy; 1117 1118 cur_time = ktime_get_boottime(); 1119 val = mt76_rr(dev, MT_ED_CCA_TIMER); 1120 1121 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1122 dev->ed_time = cur_time; 1123 1124 busy = (val * 100) / active; 1125 busy = min_t(u32, busy, 100); 1126 1127 if (busy > MT_EDCCA_TH) { 1128 dev->ed_trigger++; 1129 dev->ed_silent = 0; 1130 } else { 1131 dev->ed_silent++; 1132 dev->ed_trigger = 0; 1133 } 1134 1135 if (dev->cal.agc_lowest_gain && 1136 dev->cal.false_cca > MT_EDCCA_LEARN_CCA && 1137 dev->ed_trigger > MT_EDCCA_LEARN_TH) { 1138 dev->ed_monitor_learning = false; 1139 dev->ed_trigger_timeout = jiffies + 20 * HZ; 1140 } else if (!dev->ed_monitor_learning && 1141 time_is_after_jiffies(dev->ed_trigger_timeout)) { 1142 dev->ed_monitor_learning = true; 1143 mt76x02_edcca_tx_enable(dev, true); 1144 } 1145 1146 if (dev->ed_monitor_learning) 1147 return; 1148 1149 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked) 1150 mt76x02_edcca_tx_enable(dev, false); 1151 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked) 1152 mt76x02_edcca_tx_enable(dev, true); 1153 } 1154 1155 void mt76x02_mac_work(struct work_struct *work) 1156 { 1157 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, 1158 mt76.mac_work.work); 1159 int i, idx; 1160 1161 mutex_lock(&dev->mt76.mutex); 1162 1163 mt76_update_survey(&dev->mt76); 1164 for (i = 0, idx = 0; i < 16; i++) { 1165 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); 1166 1167 dev->mt76.aggr_stats[idx++] += val & 0xffff; 1168 dev->mt76.aggr_stats[idx++] += val >> 16; 1169 } 1170 1171 if (!dev->mt76.beacon_mask) 1172 mt76x02_check_mac_err(dev); 1173 1174 if (dev->ed_monitor) 1175 mt76x02_edcca_check(dev); 1176 1177 mutex_unlock(&dev->mt76.mutex); 1178 1179 mt76_tx_status_check(&dev->mt76, NULL, false); 1180 1181 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, 1182 MT_MAC_WORK_INTERVAL); 1183 } 1184 1185 void mt76x02_mac_cc_reset(struct mt76x02_dev *dev) 1186 { 1187 dev->mt76.survey_time = ktime_get_boottime(); 1188 1189 mt76_wr(dev, MT_CH_TIME_CFG, 1190 MT_CH_TIME_CFG_TIMER_EN | 1191 MT_CH_TIME_CFG_TX_AS_BUSY | 1192 MT_CH_TIME_CFG_RX_AS_BUSY | 1193 MT_CH_TIME_CFG_NAV_AS_BUSY | 1194 MT_CH_TIME_CFG_EIFS_AS_BUSY | 1195 MT_CH_CCA_RC_EN | 1196 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); 1197 1198 /* channel cycle counters read-and-clear */ 1199 mt76_rr(dev, MT_CH_BUSY); 1200 mt76_rr(dev, MT_CH_IDLE); 1201 } 1202 EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset); 1203 1204 void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) 1205 { 1206 idx &= 7; 1207 mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); 1208 mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR, 1209 get_unaligned_le16(addr + 4)); 1210 } 1211