1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "coredump.h" 7 #include "mt7915.h" 8 #include "../dma.h" 9 #include "mac.h" 10 #include "mcu.h" 11 12 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 13 14 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 15 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 16 .radar_pattern = { 17 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 18 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 19 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 20 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 21 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 22 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 23 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 24 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 25 }, 26 }; 27 28 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 29 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 30 .radar_pattern = { 31 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 32 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 33 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 34 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 35 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 36 }, 37 }; 38 39 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 40 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 41 .radar_pattern = { 42 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 43 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 44 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 45 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 46 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 47 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 48 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 49 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 50 }, 51 }; 52 53 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 54 u16 idx, bool unicast) 55 { 56 struct mt7915_sta *sta; 57 struct mt76_wcid *wcid; 58 59 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 60 return NULL; 61 62 wcid = rcu_dereference(dev->mt76.wcid[idx]); 63 if (unicast || !wcid) 64 return wcid; 65 66 if (!wcid->sta) 67 return NULL; 68 69 sta = container_of(wcid, struct mt7915_sta, wcid); 70 if (!sta->vif) 71 return NULL; 72 73 return &sta->vif->sta.wcid; 74 } 75 76 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 77 { 78 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 79 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 80 81 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 82 0, 5000); 83 } 84 85 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 86 { 87 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 88 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 89 90 return MT_WTBL_LMAC_OFFS(wcid, dw); 91 } 92 93 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 94 { 95 static const u8 ac_to_tid[] = { 96 [IEEE80211_AC_BE] = 0, 97 [IEEE80211_AC_BK] = 1, 98 [IEEE80211_AC_VI] = 4, 99 [IEEE80211_AC_VO] = 6 100 }; 101 struct ieee80211_sta *sta; 102 struct mt7915_sta *msta; 103 struct rate_info *rate; 104 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 105 LIST_HEAD(sta_poll_list); 106 int i; 107 108 spin_lock_bh(&dev->sta_poll_lock); 109 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 110 spin_unlock_bh(&dev->sta_poll_lock); 111 112 rcu_read_lock(); 113 114 while (true) { 115 bool clear = false; 116 u32 addr, val; 117 u16 idx; 118 s8 rssi[4]; 119 u8 bw; 120 121 spin_lock_bh(&dev->sta_poll_lock); 122 if (list_empty(&sta_poll_list)) { 123 spin_unlock_bh(&dev->sta_poll_lock); 124 break; 125 } 126 msta = list_first_entry(&sta_poll_list, 127 struct mt7915_sta, poll_list); 128 list_del_init(&msta->poll_list); 129 spin_unlock_bh(&dev->sta_poll_lock); 130 131 idx = msta->wcid.idx; 132 133 /* refresh peer's airtime reporting */ 134 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 135 136 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 137 u32 tx_last = msta->airtime_ac[i]; 138 u32 rx_last = msta->airtime_ac[i + 4]; 139 140 msta->airtime_ac[i] = mt76_rr(dev, addr); 141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 142 143 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 145 146 if ((tx_last | rx_last) & BIT(30)) 147 clear = true; 148 149 addr += 8; 150 } 151 152 if (clear) { 153 mt7915_mac_wtbl_update(dev, idx, 154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 156 } 157 158 if (!msta->wcid.sta) 159 continue; 160 161 sta = container_of((void *)msta, struct ieee80211_sta, 162 drv_priv); 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 164 u8 queue = mt76_connac_lmac_mapping(i); 165 u32 tx_cur = tx_time[queue]; 166 u32 rx_cur = rx_time[queue]; 167 u8 tid = ac_to_tid[i]; 168 169 if (!tx_cur && !rx_cur) 170 continue; 171 172 ieee80211_sta_register_airtime(sta, tid, tx_cur, 173 rx_cur); 174 } 175 176 /* 177 * We don't support reading GI info from txs packets. 178 * For accurate tx status reporting and AQL improvement, 179 * we need to make sure that flags match so polling GI 180 * from per-sta counters directly. 181 */ 182 rate = &msta->wcid.rate; 183 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 184 val = mt76_rr(dev, addr); 185 186 switch (rate->bw) { 187 case RATE_INFO_BW_160: 188 bw = IEEE80211_STA_RX_BW_160; 189 break; 190 case RATE_INFO_BW_80: 191 bw = IEEE80211_STA_RX_BW_80; 192 break; 193 case RATE_INFO_BW_40: 194 bw = IEEE80211_STA_RX_BW_40; 195 break; 196 default: 197 bw = IEEE80211_STA_RX_BW_20; 198 break; 199 } 200 201 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 202 u8 offs = 24 + 2 * bw; 203 204 rate->he_gi = (val & (0x3 << offs)) >> offs; 205 } else if (rate->flags & 206 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 207 if (val & BIT(12 + bw)) 208 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 209 else 210 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 211 } 212 213 /* get signal strength of resp frames (CTS/BA/ACK) */ 214 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30); 215 val = mt76_rr(dev, addr); 216 217 rssi[0] = to_rssi(GENMASK(7, 0), val); 218 rssi[1] = to_rssi(GENMASK(15, 8), val); 219 rssi[2] = to_rssi(GENMASK(23, 16), val); 220 rssi[3] = to_rssi(GENMASK(31, 14), val); 221 222 msta->ack_signal = 223 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 224 225 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 226 } 227 228 rcu_read_unlock(); 229 } 230 231 void mt7915_mac_enable_rtscts(struct mt7915_dev *dev, 232 struct ieee80211_vif *vif, bool enable) 233 { 234 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 235 u32 addr; 236 237 addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 238 if (enable) 239 mt76_set(dev, addr, BIT(5)); 240 else 241 mt76_clear(dev, addr, BIT(5)); 242 } 243 244 static void 245 mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q, 246 struct mt7915_sta *msta, struct sk_buff *skb, 247 u32 info) 248 { 249 struct ieee80211_vif *vif; 250 struct wireless_dev *wdev; 251 252 if (!msta || !msta->vif) 253 return; 254 255 if (!mt76_queue_is_wed_rx(q)) 256 return; 257 258 if (!(info & MT_DMA_INFO_PPE_VLD)) 259 return; 260 261 vif = container_of((void *)msta->vif, struct ieee80211_vif, 262 drv_priv); 263 wdev = ieee80211_vif_to_wdev(vif); 264 skb->dev = wdev->netdev; 265 266 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 267 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 268 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 269 } 270 271 static int 272 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, 273 enum mt76_rxq_id q, u32 *info) 274 { 275 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 276 struct mt76_phy *mphy = &dev->mt76.phy; 277 struct mt7915_phy *phy = &dev->phy; 278 struct ieee80211_supported_band *sband; 279 __le32 *rxd = (__le32 *)skb->data; 280 __le32 *rxv = NULL; 281 u32 rxd0 = le32_to_cpu(rxd[0]); 282 u32 rxd1 = le32_to_cpu(rxd[1]); 283 u32 rxd2 = le32_to_cpu(rxd[2]); 284 u32 rxd3 = le32_to_cpu(rxd[3]); 285 u32 rxd4 = le32_to_cpu(rxd[4]); 286 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 287 bool unicast, insert_ccmp_hdr = false; 288 u8 remove_pad, amsdu_info; 289 u8 mode = 0, qos_ctl = 0; 290 struct mt7915_sta *msta = NULL; 291 u32 csum_status = *(u32 *)skb->cb; 292 bool hdr_trans; 293 u16 hdr_gap; 294 u16 seq_ctrl = 0; 295 __le16 fc = 0; 296 int idx; 297 298 memset(status, 0, sizeof(*status)); 299 300 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) { 301 mphy = dev->mt76.phys[MT_BAND1]; 302 if (!mphy) 303 return -EINVAL; 304 305 phy = mphy->priv; 306 status->phy_idx = 1; 307 } 308 309 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 310 return -EINVAL; 311 312 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 313 return -EINVAL; 314 315 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 316 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 317 return -EINVAL; 318 319 /* ICV error or CCMP/BIP/WPI MIC error */ 320 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 321 status->flag |= RX_FLAG_ONLY_MONITOR; 322 323 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 324 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 325 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 326 327 if (status->wcid) { 328 msta = container_of(status->wcid, struct mt7915_sta, wcid); 329 spin_lock_bh(&dev->sta_poll_lock); 330 if (list_empty(&msta->poll_list)) 331 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 332 spin_unlock_bh(&dev->sta_poll_lock); 333 } 334 335 status->freq = mphy->chandef.chan->center_freq; 336 status->band = mphy->chandef.chan->band; 337 if (status->band == NL80211_BAND_5GHZ) 338 sband = &mphy->sband_5g.sband; 339 else if (status->band == NL80211_BAND_6GHZ) 340 sband = &mphy->sband_6g.sband; 341 else 342 sband = &mphy->sband_2g.sband; 343 344 if (!sband->channels) 345 return -EINVAL; 346 347 if ((rxd0 & csum_mask) == csum_mask && 348 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 349 skb->ip_summed = CHECKSUM_UNNECESSARY; 350 351 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 352 status->flag |= RX_FLAG_FAILED_FCS_CRC; 353 354 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 355 status->flag |= RX_FLAG_MMIC_ERROR; 356 357 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 358 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 359 status->flag |= RX_FLAG_DECRYPTED; 360 status->flag |= RX_FLAG_IV_STRIPPED; 361 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 362 } 363 364 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 365 366 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 367 return -EINVAL; 368 369 rxd += 6; 370 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 371 u32 v0 = le32_to_cpu(rxd[0]); 372 u32 v2 = le32_to_cpu(rxd[2]); 373 374 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 375 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 376 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 377 378 rxd += 4; 379 if ((u8 *)rxd - skb->data >= skb->len) 380 return -EINVAL; 381 } 382 383 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 384 u8 *data = (u8 *)rxd; 385 386 if (status->flag & RX_FLAG_DECRYPTED) { 387 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 388 case MT_CIPHER_AES_CCMP: 389 case MT_CIPHER_CCMP_CCX: 390 case MT_CIPHER_CCMP_256: 391 insert_ccmp_hdr = 392 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 393 fallthrough; 394 case MT_CIPHER_TKIP: 395 case MT_CIPHER_TKIP_NO_MIC: 396 case MT_CIPHER_GCMP: 397 case MT_CIPHER_GCMP_256: 398 status->iv[0] = data[5]; 399 status->iv[1] = data[4]; 400 status->iv[2] = data[3]; 401 status->iv[3] = data[2]; 402 status->iv[4] = data[1]; 403 status->iv[5] = data[0]; 404 break; 405 default: 406 break; 407 } 408 } 409 rxd += 4; 410 if ((u8 *)rxd - skb->data >= skb->len) 411 return -EINVAL; 412 } 413 414 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 415 status->timestamp = le32_to_cpu(rxd[0]); 416 status->flag |= RX_FLAG_MACTIME_START; 417 418 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 419 status->flag |= RX_FLAG_AMPDU_DETAILS; 420 421 /* all subframes of an A-MPDU have the same timestamp */ 422 if (phy->rx_ampdu_ts != status->timestamp) { 423 if (!++phy->ampdu_ref) 424 phy->ampdu_ref++; 425 } 426 phy->rx_ampdu_ts = status->timestamp; 427 428 status->ampdu_ref = phy->ampdu_ref; 429 } 430 431 rxd += 2; 432 if ((u8 *)rxd - skb->data >= skb->len) 433 return -EINVAL; 434 } 435 436 /* RXD Group 3 - P-RXV */ 437 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 438 u32 v0, v1; 439 int ret; 440 441 rxv = rxd; 442 rxd += 2; 443 if ((u8 *)rxd - skb->data >= skb->len) 444 return -EINVAL; 445 446 v0 = le32_to_cpu(rxv[0]); 447 v1 = le32_to_cpu(rxv[1]); 448 449 if (v0 & MT_PRXV_HT_AD_CODE) 450 status->enc_flags |= RX_ENC_FLAG_LDPC; 451 452 status->chains = mphy->antenna_mask; 453 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 454 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 455 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 456 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 457 458 /* RXD Group 5 - C-RXV */ 459 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 460 rxd += 18; 461 if ((u8 *)rxd - skb->data >= skb->len) 462 return -EINVAL; 463 } 464 465 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 466 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 467 sband, rxv, &mode); 468 if (ret < 0) 469 return ret; 470 } 471 } 472 473 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 474 status->amsdu = !!amsdu_info; 475 if (status->amsdu) { 476 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 477 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 478 } 479 480 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 481 if (hdr_trans && ieee80211_has_morefrags(fc)) { 482 struct ieee80211_vif *vif; 483 int err; 484 485 if (!msta || !msta->vif) 486 return -EINVAL; 487 488 vif = container_of((void *)msta->vif, struct ieee80211_vif, 489 drv_priv); 490 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 491 if (err) 492 return err; 493 494 hdr_trans = false; 495 } else { 496 int pad_start = 0; 497 498 skb_pull(skb, hdr_gap); 499 if (!hdr_trans && status->amsdu) { 500 pad_start = ieee80211_get_hdrlen_from_skb(skb); 501 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 502 /* 503 * When header translation failure is indicated, 504 * the hardware will insert an extra 2-byte field 505 * containing the data length after the protocol 506 * type field. This happens either when the LLC-SNAP 507 * pattern did not match, or if a VLAN header was 508 * detected. 509 */ 510 pad_start = 12; 511 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 512 pad_start += 4; 513 else 514 pad_start = 0; 515 } 516 517 if (pad_start) { 518 memmove(skb->data + 2, skb->data, pad_start); 519 skb_pull(skb, 2); 520 } 521 } 522 523 if (!hdr_trans) { 524 struct ieee80211_hdr *hdr; 525 526 if (insert_ccmp_hdr) { 527 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 528 529 mt76_insert_ccmp_hdr(skb, key_id); 530 } 531 532 hdr = mt76_skb_get_hdr(skb); 533 fc = hdr->frame_control; 534 if (ieee80211_is_data_qos(fc)) { 535 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 536 qos_ctl = *ieee80211_get_qos_ctl(hdr); 537 } 538 } else { 539 status->flag |= RX_FLAG_8023; 540 mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 541 *info); 542 } 543 544 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 545 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 546 547 if (!status->wcid || !ieee80211_is_data_qos(fc)) 548 return 0; 549 550 status->aggr = unicast && 551 !ieee80211_is_qos_nullfunc(fc); 552 status->qos_ctl = qos_ctl; 553 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 554 555 return 0; 556 } 557 558 static void 559 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 560 { 561 #ifdef CONFIG_NL80211_TESTMODE 562 struct mt7915_phy *phy = &dev->phy; 563 __le32 *rxd = (__le32 *)skb->data; 564 __le32 *rxv_hdr = rxd + 2; 565 __le32 *rxv = rxd + 4; 566 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 567 u8 band_idx; 568 s32 foe; 569 u8 snr; 570 int i; 571 572 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 573 if (band_idx && !phy->mt76->band_idx) { 574 phy = mt7915_ext_phy(dev); 575 if (!phy) 576 goto out; 577 } 578 579 rcpi = le32_to_cpu(rxv[6]); 580 ib_rssi = le32_to_cpu(rxv[7]); 581 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 582 583 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 584 if (i == 3) 585 wb_rssi = le32_to_cpu(rxv[9]); 586 587 phy->test.last_rcpi[i] = rcpi & 0xff; 588 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 589 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 590 } 591 592 v20 = le32_to_cpu(rxv[20]); 593 v21 = le32_to_cpu(rxv[21]); 594 595 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 596 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 597 598 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 599 600 phy->test.last_freq_offset = foe; 601 phy->test.last_snr = snr; 602 out: 603 #endif 604 dev_kfree_skb(skb); 605 } 606 607 static void 608 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 609 struct sk_buff *skb) 610 { 611 #ifdef CONFIG_NL80211_TESTMODE 612 struct mt76_testmode_data *td = &phy->mt76->test; 613 const struct ieee80211_rate *r; 614 u8 bw, mode, nss = td->tx_rate_nss; 615 u8 rate_idx = td->tx_rate_idx; 616 u16 rateval = 0; 617 u32 val; 618 bool cck = false; 619 int band; 620 621 if (skb != phy->mt76->test.tx_skb) 622 return; 623 624 switch (td->tx_rate_mode) { 625 case MT76_TM_TX_MODE_HT: 626 nss = 1 + (rate_idx >> 3); 627 mode = MT_PHY_TYPE_HT; 628 break; 629 case MT76_TM_TX_MODE_VHT: 630 mode = MT_PHY_TYPE_VHT; 631 break; 632 case MT76_TM_TX_MODE_HE_SU: 633 mode = MT_PHY_TYPE_HE_SU; 634 break; 635 case MT76_TM_TX_MODE_HE_EXT_SU: 636 mode = MT_PHY_TYPE_HE_EXT_SU; 637 break; 638 case MT76_TM_TX_MODE_HE_TB: 639 mode = MT_PHY_TYPE_HE_TB; 640 break; 641 case MT76_TM_TX_MODE_HE_MU: 642 mode = MT_PHY_TYPE_HE_MU; 643 break; 644 case MT76_TM_TX_MODE_CCK: 645 cck = true; 646 fallthrough; 647 case MT76_TM_TX_MODE_OFDM: 648 band = phy->mt76->chandef.chan->band; 649 if (band == NL80211_BAND_2GHZ && !cck) 650 rate_idx += 4; 651 652 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 653 val = cck ? r->hw_value_short : r->hw_value; 654 655 mode = val >> 8; 656 rate_idx = val & 0xff; 657 break; 658 default: 659 mode = MT_PHY_TYPE_OFDM; 660 break; 661 } 662 663 switch (phy->mt76->chandef.width) { 664 case NL80211_CHAN_WIDTH_40: 665 bw = 1; 666 break; 667 case NL80211_CHAN_WIDTH_80: 668 bw = 2; 669 break; 670 case NL80211_CHAN_WIDTH_80P80: 671 case NL80211_CHAN_WIDTH_160: 672 bw = 3; 673 break; 674 default: 675 bw = 0; 676 break; 677 } 678 679 if (td->tx_rate_stbc && nss == 1) { 680 nss++; 681 rateval |= MT_TX_RATE_STBC; 682 } 683 684 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 685 FIELD_PREP(MT_TX_RATE_MODE, mode) | 686 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 687 688 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 689 690 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 691 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 692 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 693 694 val = MT_TXD6_FIXED_BW | 695 FIELD_PREP(MT_TXD6_BW, bw) | 696 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 697 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 698 699 /* for HE_SU/HE_EXT_SU PPDU 700 * - 1x, 2x, 4x LTF + 0.8us GI 701 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 702 * for HE_MU PPDU 703 * - 2x, 4x LTF + 0.8us GI 704 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 705 * for HE_TB PPDU 706 * - 1x, 2x LTF + 1.6us GI 707 * - 4x LTF + 3.2us GI 708 */ 709 if (mode >= MT_PHY_TYPE_HE_SU) 710 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 711 712 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 713 val |= MT_TXD6_LDPC; 714 715 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 716 txwi[6] |= cpu_to_le32(val); 717 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 718 phy->test.spe_idx)); 719 #endif 720 } 721 722 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 723 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 724 struct ieee80211_key_conf *key, 725 enum mt76_txq_id qid, u32 changed) 726 { 727 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 728 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 729 struct mt76_phy *mphy = &dev->phy; 730 731 if (phy_idx && dev->phys[MT_BAND1]) 732 mphy = dev->phys[MT_BAND1]; 733 734 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 735 736 if (mt76_testmode_enabled(mphy)) 737 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 738 } 739 740 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 741 enum mt76_txq_id qid, struct mt76_wcid *wcid, 742 struct ieee80211_sta *sta, 743 struct mt76_tx_info *tx_info) 744 { 745 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 746 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 747 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 748 struct ieee80211_key_conf *key = info->control.hw_key; 749 struct ieee80211_vif *vif = info->control.vif; 750 struct mt76_connac_fw_txp *txp; 751 struct mt76_txwi_cache *t; 752 int id, i, nbuf = tx_info->nbuf - 1; 753 u8 *txwi = (u8 *)txwi_ptr; 754 int pid; 755 756 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 757 return -EINVAL; 758 759 if (!wcid) 760 wcid = &dev->mt76.global_wcid; 761 762 if (sta) { 763 struct mt7915_sta *msta; 764 765 msta = (struct mt7915_sta *)sta->drv_priv; 766 767 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 768 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 769 msta->jiffies = jiffies; 770 } 771 } 772 773 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 774 t->skb = tx_info->skb; 775 776 id = mt76_token_consume(mdev, &t); 777 if (id < 0) 778 return id; 779 780 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 781 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 782 qid, 0); 783 784 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 785 for (i = 0; i < nbuf; i++) { 786 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 787 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 788 } 789 txp->nbuf = nbuf; 790 791 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 792 793 if (!key) 794 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 795 796 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 797 ieee80211_is_mgmt(hdr->frame_control)) 798 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 799 800 if (vif) { 801 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 802 803 txp->bss_idx = mvif->mt76.idx; 804 } 805 806 txp->token = cpu_to_le16(id); 807 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 808 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 809 else 810 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 811 tx_info->skb = DMA_DUMMY_DATA; 812 813 /* pass partial skb header to fw */ 814 tx_info->buf[1].len = MT_CT_PARSE_LEN; 815 tx_info->buf[1].skip_unmap = true; 816 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 817 818 return 0; 819 } 820 821 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 822 { 823 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 824 __le32 *txwi = ptr; 825 u32 val; 826 827 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 828 829 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 830 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 831 txwi[0] = cpu_to_le32(val); 832 833 val = MT_TXD1_LONG_FORMAT | 834 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 835 txwi[1] = cpu_to_le32(val); 836 837 txp->token = cpu_to_le16(token_id); 838 txp->nbuf = 1; 839 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 840 841 return MT_TXD_SIZE + sizeof(*txp); 842 } 843 844 static void 845 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 846 { 847 struct mt7915_sta *msta; 848 u16 fc, tid; 849 u32 val; 850 851 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 852 return; 853 854 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 855 if (tid >= 6) /* skip VO queue */ 856 return; 857 858 val = le32_to_cpu(txwi[2]); 859 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 860 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 861 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 862 return; 863 864 msta = (struct mt7915_sta *)sta->drv_priv; 865 if (!test_and_set_bit(tid, &msta->ampdu_state)) 866 ieee80211_start_tx_ba_session(sta, tid, 0); 867 } 868 869 static void 870 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 871 struct ieee80211_sta *sta, struct list_head *free_list) 872 { 873 struct mt76_dev *mdev = &dev->mt76; 874 struct mt7915_sta *msta; 875 struct mt76_wcid *wcid; 876 __le32 *txwi; 877 u16 wcid_idx; 878 879 mt76_connac_txp_skb_unmap(mdev, t); 880 if (!t->skb) 881 goto out; 882 883 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 884 if (sta) { 885 wcid = (struct mt76_wcid *)sta->drv_priv; 886 wcid_idx = wcid->idx; 887 } else { 888 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 889 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 890 891 if (wcid && wcid->sta) { 892 msta = container_of(wcid, struct mt7915_sta, wcid); 893 sta = container_of((void *)msta, struct ieee80211_sta, 894 drv_priv); 895 spin_lock_bh(&dev->sta_poll_lock); 896 if (list_empty(&msta->poll_list)) 897 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 898 spin_unlock_bh(&dev->sta_poll_lock); 899 } 900 } 901 902 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 903 mt7915_tx_check_aggr(sta, txwi); 904 905 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 906 907 out: 908 t->skb = NULL; 909 mt76_put_txwi(mdev, t); 910 } 911 912 static void 913 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 914 { 915 struct mt76_dev *mdev = &dev->mt76; 916 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 917 918 /* clean DMA queues and unmap buffers first */ 919 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 920 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 921 if (mphy_ext) { 922 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 923 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 924 } 925 } 926 927 static void 928 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 929 struct list_head *free_list, bool wake) 930 { 931 struct sk_buff *skb, *tmp; 932 933 mt7915_mac_sta_poll(dev); 934 935 if (wake) 936 mt76_set_tx_blocked(&dev->mt76, false); 937 938 mt76_worker_schedule(&dev->mt76.tx_worker); 939 940 list_for_each_entry_safe(skb, tmp, free_list, list) { 941 skb_list_del_init(skb); 942 napi_consume_skb(skb, 1); 943 } 944 } 945 946 static void 947 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 948 { 949 struct mt76_connac_tx_free *free = data; 950 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 951 struct mt76_dev *mdev = &dev->mt76; 952 struct mt76_txwi_cache *txwi; 953 struct ieee80211_sta *sta = NULL; 954 LIST_HEAD(free_list); 955 void *end = data + len; 956 bool v3, wake = false; 957 u16 total, count = 0; 958 u32 txd = le32_to_cpu(free->txd); 959 __le32 *cur_info; 960 961 mt7915_mac_tx_free_prepare(dev); 962 963 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 964 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 965 966 for (cur_info = tx_info; count < total; cur_info++) { 967 u32 msdu, info; 968 u8 i; 969 970 if (WARN_ON_ONCE((void *)cur_info >= end)) 971 return; 972 973 /* 974 * 1'b1: new wcid pair. 975 * 1'b0: msdu_id with the same 'wcid pair' as above. 976 */ 977 info = le32_to_cpu(*cur_info); 978 if (info & MT_TX_FREE_PAIR) { 979 struct mt7915_sta *msta; 980 struct mt76_wcid *wcid; 981 u16 idx; 982 983 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 984 wcid = rcu_dereference(dev->mt76.wcid[idx]); 985 sta = wcid_to_sta(wcid); 986 if (!sta) 987 continue; 988 989 msta = container_of(wcid, struct mt7915_sta, wcid); 990 spin_lock_bh(&dev->sta_poll_lock); 991 if (list_empty(&msta->poll_list)) 992 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 993 spin_unlock_bh(&dev->sta_poll_lock); 994 continue; 995 } 996 997 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 998 continue; 999 1000 for (i = 0; i < 1 + v3; i++) { 1001 if (v3) { 1002 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 1003 if (msdu == MT_TX_FREE_MSDU_ID_V3) 1004 continue; 1005 } else { 1006 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 1007 } 1008 count++; 1009 txwi = mt76_token_release(mdev, msdu, &wake); 1010 if (!txwi) 1011 continue; 1012 1013 mt7915_txwi_free(dev, txwi, sta, &free_list); 1014 } 1015 } 1016 1017 mt7915_mac_tx_free_done(dev, &free_list, wake); 1018 } 1019 1020 static void 1021 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 1022 { 1023 struct mt76_connac_tx_free *free = data; 1024 __le16 *info = (__le16 *)(data + sizeof(*free)); 1025 struct mt76_dev *mdev = &dev->mt76; 1026 void *end = data + len; 1027 LIST_HEAD(free_list); 1028 bool wake = false; 1029 u8 i, count; 1030 1031 mt7915_mac_tx_free_prepare(dev); 1032 1033 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 1034 if (WARN_ON_ONCE((void *)&info[count] > end)) 1035 return; 1036 1037 for (i = 0; i < count; i++) { 1038 struct mt76_txwi_cache *txwi; 1039 u16 msdu = le16_to_cpu(info[i]); 1040 1041 txwi = mt76_token_release(mdev, msdu, &wake); 1042 if (!txwi) 1043 continue; 1044 1045 mt7915_txwi_free(dev, txwi, NULL, &free_list); 1046 } 1047 1048 mt7915_mac_tx_free_done(dev, &free_list, wake); 1049 } 1050 1051 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 1052 { 1053 struct mt7915_sta *msta = NULL; 1054 struct mt76_wcid *wcid; 1055 __le32 *txs_data = data; 1056 u16 wcidx; 1057 u8 pid; 1058 1059 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1060 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1061 1062 if (pid < MT_PACKET_ID_WED) 1063 return; 1064 1065 if (wcidx >= mt7915_wtbl_size(dev)) 1066 return; 1067 1068 rcu_read_lock(); 1069 1070 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1071 if (!wcid) 1072 goto out; 1073 1074 msta = container_of(wcid, struct mt7915_sta, wcid); 1075 1076 if (pid == MT_PACKET_ID_WED) 1077 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1078 else 1079 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1080 1081 if (!wcid->sta) 1082 goto out; 1083 1084 spin_lock_bh(&dev->sta_poll_lock); 1085 if (list_empty(&msta->poll_list)) 1086 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1087 spin_unlock_bh(&dev->sta_poll_lock); 1088 1089 out: 1090 rcu_read_unlock(); 1091 } 1092 1093 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1094 { 1095 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1096 __le32 *rxd = (__le32 *)data; 1097 __le32 *end = (__le32 *)&rxd[len / 4]; 1098 enum rx_pkt_type type; 1099 1100 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1101 1102 switch (type) { 1103 case PKT_TYPE_TXRX_NOTIFY: 1104 mt7915_mac_tx_free(dev, data, len); 1105 return false; 1106 case PKT_TYPE_TXRX_NOTIFY_V0: 1107 mt7915_mac_tx_free_v0(dev, data, len); 1108 return false; 1109 case PKT_TYPE_TXS: 1110 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1111 mt7915_mac_add_txs(dev, rxd); 1112 return false; 1113 case PKT_TYPE_RX_FW_MONITOR: 1114 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1115 return false; 1116 default: 1117 return true; 1118 } 1119 } 1120 1121 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1122 struct sk_buff *skb, u32 *info) 1123 { 1124 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1125 __le32 *rxd = (__le32 *)skb->data; 1126 __le32 *end = (__le32 *)&skb->data[skb->len]; 1127 enum rx_pkt_type type; 1128 1129 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1130 1131 switch (type) { 1132 case PKT_TYPE_TXRX_NOTIFY: 1133 mt7915_mac_tx_free(dev, skb->data, skb->len); 1134 napi_consume_skb(skb, 1); 1135 break; 1136 case PKT_TYPE_TXRX_NOTIFY_V0: 1137 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1138 napi_consume_skb(skb, 1); 1139 break; 1140 case PKT_TYPE_RX_EVENT: 1141 mt7915_mcu_rx_event(dev, skb); 1142 break; 1143 case PKT_TYPE_TXRXV: 1144 mt7915_mac_fill_rx_vector(dev, skb); 1145 break; 1146 case PKT_TYPE_TXS: 1147 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1148 mt7915_mac_add_txs(dev, rxd); 1149 dev_kfree_skb(skb); 1150 break; 1151 case PKT_TYPE_RX_FW_MONITOR: 1152 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1153 dev_kfree_skb(skb); 1154 break; 1155 case PKT_TYPE_NORMAL: 1156 if (!mt7915_mac_fill_rx(dev, skb, q, info)) { 1157 mt76_rx(&dev->mt76, q, skb); 1158 return; 1159 } 1160 fallthrough; 1161 default: 1162 dev_kfree_skb(skb); 1163 break; 1164 } 1165 } 1166 1167 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1168 { 1169 struct mt7915_dev *dev = phy->dev; 1170 u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx); 1171 1172 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1173 mt76_set(dev, reg, BIT(11) | BIT(9)); 1174 } 1175 1176 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1177 { 1178 struct mt7915_dev *dev = phy->dev; 1179 int i; 1180 1181 for (i = 0; i < 4; i++) { 1182 mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i)); 1183 mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i)); 1184 } 1185 1186 phy->mt76->survey_time = ktime_get_boottime(); 1187 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1188 1189 /* reset airtime counters */ 1190 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx), 1191 MT_WF_RMAC_MIB_RXTIME_CLR); 1192 1193 mt7915_mcu_get_chan_mib_info(phy, true); 1194 } 1195 1196 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1197 { 1198 s16 coverage_class = phy->coverage_class; 1199 struct mt7915_dev *dev = phy->dev; 1200 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1201 u32 val, reg_offset; 1202 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1203 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1204 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1205 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1206 u8 band = phy->mt76->band_idx; 1207 int eifs_ofdm = 360, sifs = 10, offset; 1208 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1209 1210 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1211 return; 1212 1213 if (ext_phy) 1214 coverage_class = max_t(s16, dev->phy.coverage_class, 1215 ext_phy->coverage_class); 1216 1217 mt76_set(dev, MT_ARB_SCR(band), 1218 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1219 udelay(1); 1220 1221 offset = 3 * coverage_class; 1222 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1223 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1224 1225 if (!is_mt7915(&dev->mt76)) { 1226 if (!a_band) { 1227 mt76_wr(dev, MT_TMAC_ICR1(band), 1228 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1229 eifs_ofdm = 78; 1230 } else { 1231 eifs_ofdm = 84; 1232 } 1233 } else if (a_band) { 1234 sifs = 16; 1235 } 1236 1237 mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset); 1238 mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset); 1239 mt76_wr(dev, MT_TMAC_ICR0(band), 1240 FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) | 1241 FIELD_PREP(MT_IFS_RIFS, 2) | 1242 FIELD_PREP(MT_IFS_SIFS, sifs) | 1243 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1244 1245 if (phy->slottime < 20 || a_band) 1246 val = MT7915_CFEND_RATE_DEFAULT; 1247 else 1248 val = MT7915_CFEND_RATE_11B; 1249 1250 mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val); 1251 mt76_clear(dev, MT_ARB_SCR(band), 1252 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1253 } 1254 1255 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band) 1256 { 1257 u32 reg; 1258 1259 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) : 1260 MT_WF_PHY_RXTD12_MT7916(band); 1261 mt76_set(dev, reg, 1262 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1263 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1264 1265 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) : 1266 MT_WF_PHY_RX_CTRL1_MT7916(band); 1267 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1268 } 1269 1270 static u8 1271 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1272 { 1273 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1274 struct mt7915_dev *dev = phy->dev; 1275 u32 val, sum = 0, n = 0; 1276 int nss, i; 1277 1278 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1279 u32 reg = is_mt7915(&dev->mt76) ? 1280 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1281 MT_WF_IRPI_NSS_MT7916(idx, nss); 1282 1283 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1284 val = mt76_rr(dev, reg); 1285 sum += val * nf_power[i]; 1286 n += val; 1287 } 1288 } 1289 1290 if (!n) 1291 return 0; 1292 1293 return sum / n; 1294 } 1295 1296 void mt7915_update_channel(struct mt76_phy *mphy) 1297 { 1298 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1299 struct mt76_channel_state *state = mphy->chan_state; 1300 int nf; 1301 1302 mt7915_mcu_get_chan_mib_info(phy, false); 1303 1304 nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx); 1305 if (!phy->noise) 1306 phy->noise = nf << 4; 1307 else if (nf) 1308 phy->noise += nf - (phy->noise >> 4); 1309 1310 state->noise = -(phy->noise >> 4); 1311 } 1312 1313 static bool 1314 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1315 { 1316 bool ret; 1317 1318 ret = wait_event_timeout(dev->reset_wait, 1319 (READ_ONCE(dev->recovery.state) & state), 1320 MT7915_RESET_TIMEOUT); 1321 1322 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1323 return ret; 1324 } 1325 1326 static void 1327 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1328 { 1329 struct ieee80211_hw *hw = priv; 1330 1331 switch (vif->type) { 1332 case NL80211_IFTYPE_MESH_POINT: 1333 case NL80211_IFTYPE_ADHOC: 1334 case NL80211_IFTYPE_AP: 1335 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1336 BSS_CHANGED_BEACON_ENABLED); 1337 break; 1338 default: 1339 break; 1340 } 1341 } 1342 1343 static void 1344 mt7915_update_beacons(struct mt7915_dev *dev) 1345 { 1346 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1347 1348 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1349 IEEE80211_IFACE_ITER_RESUME_ALL, 1350 mt7915_update_vif_beacon, dev->mt76.hw); 1351 1352 if (!mphy_ext) 1353 return; 1354 1355 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1356 IEEE80211_IFACE_ITER_RESUME_ALL, 1357 mt7915_update_vif_beacon, mphy_ext->hw); 1358 } 1359 1360 void mt7915_tx_token_put(struct mt7915_dev *dev) 1361 { 1362 struct mt76_txwi_cache *txwi; 1363 int id; 1364 1365 spin_lock_bh(&dev->mt76.token_lock); 1366 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1367 mt7915_txwi_free(dev, txwi, NULL, NULL); 1368 dev->mt76.token_count--; 1369 } 1370 spin_unlock_bh(&dev->mt76.token_lock); 1371 idr_destroy(&dev->mt76.token); 1372 } 1373 1374 static int 1375 mt7915_mac_restart(struct mt7915_dev *dev) 1376 { 1377 struct mt7915_phy *phy2; 1378 struct mt76_phy *ext_phy; 1379 struct mt76_dev *mdev = &dev->mt76; 1380 int i, ret; 1381 1382 ext_phy = dev->mt76.phys[MT_BAND1]; 1383 phy2 = ext_phy ? ext_phy->priv : NULL; 1384 1385 if (dev->hif2) { 1386 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1387 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1388 } 1389 1390 if (dev_is_pci(mdev->dev)) { 1391 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1392 if (dev->hif2) 1393 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1394 } 1395 1396 set_bit(MT76_RESET, &dev->mphy.state); 1397 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1398 wake_up(&dev->mt76.mcu.wait); 1399 if (ext_phy) { 1400 set_bit(MT76_RESET, &ext_phy->state); 1401 set_bit(MT76_MCU_RESET, &ext_phy->state); 1402 } 1403 1404 /* lock/unlock all queues to ensure that no tx is pending */ 1405 mt76_txq_schedule_all(&dev->mphy); 1406 if (ext_phy) 1407 mt76_txq_schedule_all(ext_phy); 1408 1409 /* disable all tx/rx napi */ 1410 mt76_worker_disable(&dev->mt76.tx_worker); 1411 mt76_for_each_q_rx(mdev, i) { 1412 if (mdev->q_rx[i].ndesc) 1413 napi_disable(&dev->mt76.napi[i]); 1414 } 1415 napi_disable(&dev->mt76.tx_napi); 1416 1417 /* token reinit */ 1418 mt7915_tx_token_put(dev); 1419 idr_init(&dev->mt76.token); 1420 1421 mt7915_dma_reset(dev, true); 1422 1423 local_bh_disable(); 1424 mt76_for_each_q_rx(mdev, i) { 1425 if (mdev->q_rx[i].ndesc) { 1426 napi_enable(&dev->mt76.napi[i]); 1427 napi_schedule(&dev->mt76.napi[i]); 1428 } 1429 } 1430 local_bh_enable(); 1431 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1432 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1433 1434 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1435 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1436 1437 if (dev->hif2) { 1438 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1439 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1440 } 1441 if (dev_is_pci(mdev->dev)) { 1442 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1443 if (dev->hif2) 1444 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1445 } 1446 1447 /* load firmware */ 1448 ret = mt7915_mcu_init_firmware(dev); 1449 if (ret) 1450 goto out; 1451 1452 /* set the necessary init items */ 1453 ret = mt7915_mcu_set_eeprom(dev); 1454 if (ret) 1455 goto out; 1456 1457 mt7915_mac_init(dev); 1458 mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); 1459 mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); 1460 ret = mt7915_txbf_init(dev); 1461 1462 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1463 ret = mt7915_run(dev->mphy.hw); 1464 if (ret) 1465 goto out; 1466 } 1467 1468 if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) { 1469 ret = mt7915_run(ext_phy->hw); 1470 if (ret) 1471 goto out; 1472 } 1473 1474 out: 1475 /* reset done */ 1476 clear_bit(MT76_RESET, &dev->mphy.state); 1477 if (phy2) 1478 clear_bit(MT76_RESET, &phy2->mt76->state); 1479 1480 local_bh_disable(); 1481 napi_enable(&dev->mt76.tx_napi); 1482 napi_schedule(&dev->mt76.tx_napi); 1483 local_bh_enable(); 1484 1485 mt76_worker_enable(&dev->mt76.tx_worker); 1486 1487 return ret; 1488 } 1489 1490 static void 1491 mt7915_mac_full_reset(struct mt7915_dev *dev) 1492 { 1493 struct mt76_phy *ext_phy; 1494 int i; 1495 1496 ext_phy = dev->mt76.phys[MT_BAND1]; 1497 1498 dev->recovery.hw_full_reset = true; 1499 1500 wake_up(&dev->mt76.mcu.wait); 1501 ieee80211_stop_queues(mt76_hw(dev)); 1502 if (ext_phy) 1503 ieee80211_stop_queues(ext_phy->hw); 1504 1505 cancel_delayed_work_sync(&dev->mphy.mac_work); 1506 if (ext_phy) 1507 cancel_delayed_work_sync(&ext_phy->mac_work); 1508 1509 mutex_lock(&dev->mt76.mutex); 1510 for (i = 0; i < 10; i++) { 1511 if (!mt7915_mac_restart(dev)) 1512 break; 1513 } 1514 mutex_unlock(&dev->mt76.mutex); 1515 1516 if (i == 10) 1517 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1518 1519 ieee80211_restart_hw(mt76_hw(dev)); 1520 if (ext_phy) 1521 ieee80211_restart_hw(ext_phy->hw); 1522 1523 ieee80211_wake_queues(mt76_hw(dev)); 1524 if (ext_phy) 1525 ieee80211_wake_queues(ext_phy->hw); 1526 1527 dev->recovery.hw_full_reset = false; 1528 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1529 MT7915_WATCHDOG_TIME); 1530 if (ext_phy) 1531 ieee80211_queue_delayed_work(ext_phy->hw, 1532 &ext_phy->mac_work, 1533 MT7915_WATCHDOG_TIME); 1534 } 1535 1536 /* system error recovery */ 1537 void mt7915_mac_reset_work(struct work_struct *work) 1538 { 1539 struct mt7915_phy *phy2; 1540 struct mt76_phy *ext_phy; 1541 struct mt7915_dev *dev; 1542 int i; 1543 1544 dev = container_of(work, struct mt7915_dev, reset_work); 1545 ext_phy = dev->mt76.phys[MT_BAND1]; 1546 phy2 = ext_phy ? ext_phy->priv : NULL; 1547 1548 /* chip full reset */ 1549 if (dev->recovery.restart) { 1550 /* disable WA/WM WDT */ 1551 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1552 MT_MCU_CMD_WDT_MASK); 1553 1554 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1555 dev->recovery.wa_reset_count++; 1556 else 1557 dev->recovery.wm_reset_count++; 1558 1559 mt7915_mac_full_reset(dev); 1560 1561 /* enable mcu irq */ 1562 mt7915_irq_enable(dev, MT_INT_MCU_CMD); 1563 mt7915_irq_disable(dev, 0); 1564 1565 /* enable WA/WM WDT */ 1566 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1567 1568 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1569 dev->recovery.restart = false; 1570 return; 1571 } 1572 1573 /* chip partial reset */ 1574 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1575 return; 1576 1577 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 1578 mtk_wed_device_stop(&dev->mt76.mmio.wed); 1579 if (!is_mt7986(&dev->mt76)) 1580 mt76_wr(dev, MT_INT_WED_MASK_CSR, 0); 1581 } 1582 1583 ieee80211_stop_queues(mt76_hw(dev)); 1584 if (ext_phy) 1585 ieee80211_stop_queues(ext_phy->hw); 1586 1587 set_bit(MT76_RESET, &dev->mphy.state); 1588 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1589 wake_up(&dev->mt76.mcu.wait); 1590 cancel_delayed_work_sync(&dev->mphy.mac_work); 1591 if (phy2) { 1592 set_bit(MT76_RESET, &phy2->mt76->state); 1593 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1594 } 1595 mt76_worker_disable(&dev->mt76.tx_worker); 1596 mt76_for_each_q_rx(&dev->mt76, i) 1597 napi_disable(&dev->mt76.napi[i]); 1598 napi_disable(&dev->mt76.tx_napi); 1599 1600 mutex_lock(&dev->mt76.mutex); 1601 1602 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1603 1604 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1605 mt7915_dma_reset(dev, false); 1606 1607 mt7915_tx_token_put(dev); 1608 idr_init(&dev->mt76.token); 1609 1610 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1611 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1612 } 1613 1614 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1615 clear_bit(MT76_RESET, &dev->mphy.state); 1616 if (phy2) 1617 clear_bit(MT76_RESET, &phy2->mt76->state); 1618 1619 local_bh_disable(); 1620 mt76_for_each_q_rx(&dev->mt76, i) { 1621 napi_enable(&dev->mt76.napi[i]); 1622 napi_schedule(&dev->mt76.napi[i]); 1623 } 1624 local_bh_enable(); 1625 1626 tasklet_schedule(&dev->mt76.irq_tasklet); 1627 1628 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1629 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1630 1631 mt76_worker_enable(&dev->mt76.tx_worker); 1632 1633 local_bh_disable(); 1634 napi_enable(&dev->mt76.tx_napi); 1635 napi_schedule(&dev->mt76.tx_napi); 1636 local_bh_enable(); 1637 1638 ieee80211_wake_queues(mt76_hw(dev)); 1639 if (ext_phy) 1640 ieee80211_wake_queues(ext_phy->hw); 1641 1642 mutex_unlock(&dev->mt76.mutex); 1643 1644 mt7915_update_beacons(dev); 1645 1646 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1647 MT7915_WATCHDOG_TIME); 1648 if (phy2) 1649 ieee80211_queue_delayed_work(ext_phy->hw, 1650 &phy2->mt76->mac_work, 1651 MT7915_WATCHDOG_TIME); 1652 } 1653 1654 /* firmware coredump */ 1655 void mt7915_mac_dump_work(struct work_struct *work) 1656 { 1657 const struct mt7915_mem_region *mem_region; 1658 struct mt7915_crash_data *crash_data; 1659 struct mt7915_dev *dev; 1660 struct mt7915_mem_hdr *hdr; 1661 size_t buf_len; 1662 int i; 1663 u32 num; 1664 u8 *buf; 1665 1666 dev = container_of(work, struct mt7915_dev, dump_work); 1667 1668 mutex_lock(&dev->dump_mutex); 1669 1670 crash_data = mt7915_coredump_new(dev); 1671 if (!crash_data) { 1672 mutex_unlock(&dev->dump_mutex); 1673 goto skip_coredump; 1674 } 1675 1676 mem_region = mt7915_coredump_get_mem_layout(dev, &num); 1677 if (!mem_region || !crash_data->memdump_buf_len) { 1678 mutex_unlock(&dev->dump_mutex); 1679 goto skip_memdump; 1680 } 1681 1682 buf = crash_data->memdump_buf; 1683 buf_len = crash_data->memdump_buf_len; 1684 1685 /* dumping memory content... */ 1686 memset(buf, 0, buf_len); 1687 for (i = 0; i < num; i++) { 1688 if (mem_region->len > buf_len) { 1689 dev_warn(dev->mt76.dev, "%s len %lu is too large\n", 1690 mem_region->name, 1691 (unsigned long)mem_region->len); 1692 break; 1693 } 1694 1695 /* reserve space for the header */ 1696 hdr = (void *)buf; 1697 buf += sizeof(*hdr); 1698 buf_len -= sizeof(*hdr); 1699 1700 mt7915_memcpy_fromio(dev, buf, mem_region->start, 1701 mem_region->len); 1702 1703 hdr->start = mem_region->start; 1704 hdr->len = mem_region->len; 1705 1706 if (!mem_region->len) 1707 /* note: the header remains, just with zero length */ 1708 break; 1709 1710 buf += mem_region->len; 1711 buf_len -= mem_region->len; 1712 1713 mem_region++; 1714 } 1715 1716 mutex_unlock(&dev->dump_mutex); 1717 1718 skip_memdump: 1719 mt7915_coredump_submit(dev); 1720 skip_coredump: 1721 queue_work(dev->mt76.wq, &dev->reset_work); 1722 } 1723 1724 void mt7915_reset(struct mt7915_dev *dev) 1725 { 1726 if (!dev->recovery.hw_init_done) 1727 return; 1728 1729 if (dev->recovery.hw_full_reset) 1730 return; 1731 1732 /* wm/wa exception: do full recovery */ 1733 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 1734 dev->recovery.restart = true; 1735 dev_info(dev->mt76.dev, 1736 "%s indicated firmware crash, attempting recovery\n", 1737 wiphy_name(dev->mt76.hw->wiphy)); 1738 1739 mt7915_irq_disable(dev, MT_INT_MCU_CMD); 1740 queue_work(dev->mt76.wq, &dev->dump_work); 1741 return; 1742 } 1743 1744 queue_work(dev->mt76.wq, &dev->reset_work); 1745 wake_up(&dev->reset_wait); 1746 } 1747 1748 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1749 { 1750 struct mt7915_dev *dev = phy->dev; 1751 struct mib_stats *mib = &phy->mib; 1752 int i, aggr0 = 0, aggr1, cnt; 1753 u8 band = phy->mt76->band_idx; 1754 u32 val; 1755 1756 cnt = mt76_rr(dev, MT_MIB_SDR3(band)); 1757 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1758 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1759 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1760 1761 cnt = mt76_rr(dev, MT_MIB_SDR4(band)); 1762 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1763 1764 cnt = mt76_rr(dev, MT_MIB_SDR5(band)); 1765 mib->rx_mpdu_cnt += cnt; 1766 1767 cnt = mt76_rr(dev, MT_MIB_SDR6(band)); 1768 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1769 1770 cnt = mt76_rr(dev, MT_MIB_SDR7(band)); 1771 mib->rx_vector_mismatch_cnt += 1772 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1773 1774 cnt = mt76_rr(dev, MT_MIB_SDR8(band)); 1775 mib->rx_delimiter_fail_cnt += 1776 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1777 1778 cnt = mt76_rr(dev, MT_MIB_SDR10(band)); 1779 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1780 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1781 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1782 1783 cnt = mt76_rr(dev, MT_MIB_SDR11(band)); 1784 mib->rx_len_mismatch_cnt += 1785 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1786 1787 cnt = mt76_rr(dev, MT_MIB_SDR12(band)); 1788 mib->tx_ampdu_cnt += cnt; 1789 1790 cnt = mt76_rr(dev, MT_MIB_SDR13(band)); 1791 mib->tx_stop_q_empty_cnt += 1792 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1793 1794 cnt = mt76_rr(dev, MT_MIB_SDR14(band)); 1795 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1796 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1797 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1798 1799 cnt = mt76_rr(dev, MT_MIB_SDR15(band)); 1800 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1801 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1802 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1803 1804 cnt = mt76_rr(dev, MT_MIB_SDR16(band)); 1805 mib->primary_cca_busy_time += 1806 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1807 1808 cnt = mt76_rr(dev, MT_MIB_SDR17(band)); 1809 mib->secondary_cca_busy_time += 1810 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1811 1812 cnt = mt76_rr(dev, MT_MIB_SDR18(band)); 1813 mib->primary_energy_detect_time += 1814 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1815 1816 cnt = mt76_rr(dev, MT_MIB_SDR19(band)); 1817 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1818 1819 cnt = mt76_rr(dev, MT_MIB_SDR20(band)); 1820 mib->ofdm_mdrdy_time += 1821 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1822 1823 cnt = mt76_rr(dev, MT_MIB_SDR21(band)); 1824 mib->green_mdrdy_time += 1825 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1826 1827 cnt = mt76_rr(dev, MT_MIB_SDR22(band)); 1828 mib->rx_ampdu_cnt += cnt; 1829 1830 cnt = mt76_rr(dev, MT_MIB_SDR23(band)); 1831 mib->rx_ampdu_bytes_cnt += cnt; 1832 1833 cnt = mt76_rr(dev, MT_MIB_SDR24(band)); 1834 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1835 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1836 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1837 1838 cnt = mt76_rr(dev, MT_MIB_SDR25(band)); 1839 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1840 1841 cnt = mt76_rr(dev, MT_MIB_SDR27(band)); 1842 mib->tx_rwp_fail_cnt += 1843 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1844 1845 cnt = mt76_rr(dev, MT_MIB_SDR28(band)); 1846 mib->tx_rwp_need_cnt += 1847 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1848 1849 cnt = mt76_rr(dev, MT_MIB_SDR29(band)); 1850 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1851 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1852 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1853 1854 cnt = mt76_rr(dev, MT_MIB_SDRVEC(band)); 1855 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1856 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1857 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1858 1859 cnt = mt76_rr(dev, MT_MIB_SDR31(band)); 1860 mib->rx_ba_cnt += cnt; 1861 1862 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band)); 1863 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1864 1865 cnt = mt76_rr(dev, MT_MIB_DR8(band)); 1866 mib->tx_mu_mpdu_cnt += cnt; 1867 1868 cnt = mt76_rr(dev, MT_MIB_DR9(band)); 1869 mib->tx_mu_acked_mpdu_cnt += cnt; 1870 1871 cnt = mt76_rr(dev, MT_MIB_DR11(band)); 1872 mib->tx_su_acked_mpdu_cnt += cnt; 1873 1874 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band)); 1875 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1876 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1877 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1878 1879 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1880 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1881 mib->tx_amsdu[i] += cnt; 1882 mib->tx_amsdu_cnt += cnt; 1883 } 1884 1885 if (is_mt7915(&dev->mt76)) { 1886 for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { 1887 val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4))); 1888 mib->ba_miss_cnt += 1889 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1890 mib->ack_fail_cnt += 1891 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1892 1893 val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4))); 1894 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1895 mib->rts_retries_cnt += 1896 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1897 1898 val = mt76_rr(dev, MT_TX_AGG_CNT(band, i)); 1899 phy->mt76->aggr_stats[aggr0++] += val & 0xffff; 1900 phy->mt76->aggr_stats[aggr0++] += val >> 16; 1901 1902 val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i)); 1903 phy->mt76->aggr_stats[aggr1++] += val & 0xffff; 1904 phy->mt76->aggr_stats[aggr1++] += val >> 16; 1905 } 1906 1907 cnt = mt76_rr(dev, MT_MIB_SDR32(band)); 1908 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1909 1910 cnt = mt76_rr(dev, MT_MIB_SDR33(band)); 1911 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1912 1913 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band)); 1914 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1915 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1916 1917 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band)); 1918 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1919 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1920 1921 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band)); 1922 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1923 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1924 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1925 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1926 } else { 1927 for (i = 0; i < 2; i++) { 1928 /* rts count */ 1929 val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2))); 1930 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1931 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1932 1933 /* rts retry count */ 1934 val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2))); 1935 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1936 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1937 1938 /* ba miss count */ 1939 val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2))); 1940 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1941 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1942 1943 /* ack fail count */ 1944 val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2))); 1945 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1946 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1947 } 1948 1949 for (i = 0; i < 8; i++) { 1950 val = mt76_rr(dev, MT_TX_AGG_CNT(band, i)); 1951 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1952 phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1953 } 1954 1955 cnt = mt76_rr(dev, MT_MIB_SDR32(band)); 1956 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1957 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1958 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1959 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1960 1961 cnt = mt76_rr(dev, MT_MIB_BFCR7(band)); 1962 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1963 1964 cnt = mt76_rr(dev, MT_MIB_BFCR2(band)); 1965 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1966 1967 cnt = mt76_rr(dev, MT_MIB_BFCR0(band)); 1968 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1969 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1970 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1971 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1972 1973 cnt = mt76_rr(dev, MT_MIB_BFCR1(band)); 1974 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1975 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1976 } 1977 } 1978 1979 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1980 { 1981 struct mt7915_dev *dev = phy->dev; 1982 u32 trb; 1983 1984 if (!phy->omac_mask) 1985 return; 1986 1987 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1988 * stopping Rx, so check status periodically to see if TRB hardware 1989 * requires minimal recovery. 1990 */ 1991 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx)); 1992 1993 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1994 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1995 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1996 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1997 trb == phy->trb_ts) 1998 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1999 phy->mt76->band_idx); 2000 2001 phy->trb_ts = trb; 2002 } 2003 2004 void mt7915_mac_sta_rc_work(struct work_struct *work) 2005 { 2006 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 2007 struct ieee80211_sta *sta; 2008 struct ieee80211_vif *vif; 2009 struct mt7915_sta *msta; 2010 u32 changed; 2011 LIST_HEAD(list); 2012 2013 spin_lock_bh(&dev->sta_poll_lock); 2014 list_splice_init(&dev->sta_rc_list, &list); 2015 2016 while (!list_empty(&list)) { 2017 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 2018 list_del_init(&msta->rc_list); 2019 changed = msta->changed; 2020 msta->changed = 0; 2021 spin_unlock_bh(&dev->sta_poll_lock); 2022 2023 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2024 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2025 2026 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2027 IEEE80211_RC_NSS_CHANGED | 2028 IEEE80211_RC_BW_CHANGED)) 2029 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 2030 2031 if (changed & IEEE80211_RC_SMPS_CHANGED) 2032 mt7915_mcu_add_smps(dev, vif, sta); 2033 2034 spin_lock_bh(&dev->sta_poll_lock); 2035 } 2036 2037 spin_unlock_bh(&dev->sta_poll_lock); 2038 } 2039 2040 void mt7915_mac_work(struct work_struct *work) 2041 { 2042 struct mt7915_phy *phy; 2043 struct mt76_phy *mphy; 2044 2045 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2046 mac_work.work); 2047 phy = mphy->priv; 2048 2049 mutex_lock(&mphy->dev->mutex); 2050 2051 mt76_update_survey(mphy); 2052 if (++mphy->mac_work_count == 5) { 2053 mphy->mac_work_count = 0; 2054 2055 mt7915_mac_update_stats(phy); 2056 mt7915_mac_severe_check(phy); 2057 } 2058 2059 mutex_unlock(&mphy->dev->mutex); 2060 2061 mt76_tx_status_check(mphy->dev, false); 2062 2063 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2064 MT7915_WATCHDOG_TIME); 2065 } 2066 2067 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 2068 { 2069 struct mt7915_dev *dev = phy->dev; 2070 2071 if (phy->rdd_state & BIT(0)) 2072 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 2073 MT_RX_SEL0, 0); 2074 if (phy->rdd_state & BIT(1)) 2075 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 2076 MT_RX_SEL0, 0); 2077 } 2078 2079 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 2080 { 2081 int err, region; 2082 2083 switch (dev->mt76.region) { 2084 case NL80211_DFS_ETSI: 2085 region = 0; 2086 break; 2087 case NL80211_DFS_JP: 2088 region = 2; 2089 break; 2090 case NL80211_DFS_FCC: 2091 default: 2092 region = 1; 2093 break; 2094 } 2095 2096 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 2097 MT_RX_SEL0, region); 2098 if (err < 0) 2099 return err; 2100 2101 if (is_mt7915(&dev->mt76)) { 2102 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain, 2103 0, dev->dbdc_support ? 2 : 0); 2104 if (err < 0) 2105 return err; 2106 } 2107 2108 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 2109 MT_RX_SEL0, 1); 2110 } 2111 2112 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 2113 { 2114 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2115 struct mt7915_dev *dev = phy->dev; 2116 int err; 2117 2118 /* start CAC */ 2119 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, 2120 phy->mt76->band_idx, MT_RX_SEL0, 0); 2121 if (err < 0) 2122 return err; 2123 2124 err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx); 2125 if (err < 0) 2126 return err; 2127 2128 phy->rdd_state |= BIT(phy->mt76->band_idx); 2129 2130 if (!is_mt7915(&dev->mt76)) 2131 return 0; 2132 2133 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2134 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2135 err = mt7915_dfs_start_rdd(dev, 1); 2136 if (err < 0) 2137 return err; 2138 2139 phy->rdd_state |= BIT(1); 2140 } 2141 2142 return 0; 2143 } 2144 2145 static int 2146 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 2147 { 2148 const struct mt7915_dfs_radar_spec *radar_specs; 2149 struct mt7915_dev *dev = phy->dev; 2150 int err, i; 2151 2152 switch (dev->mt76.region) { 2153 case NL80211_DFS_FCC: 2154 radar_specs = &fcc_radar_specs; 2155 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 2156 if (err < 0) 2157 return err; 2158 break; 2159 case NL80211_DFS_ETSI: 2160 radar_specs = &etsi_radar_specs; 2161 break; 2162 case NL80211_DFS_JP: 2163 radar_specs = &jp_radar_specs; 2164 break; 2165 default: 2166 return -EINVAL; 2167 } 2168 2169 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2170 err = mt7915_mcu_set_radar_th(dev, i, 2171 &radar_specs->radar_pattern[i]); 2172 if (err < 0) 2173 return err; 2174 } 2175 2176 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2177 } 2178 2179 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 2180 { 2181 struct mt7915_dev *dev = phy->dev; 2182 enum mt76_dfs_state dfs_state, prev_state; 2183 int err; 2184 2185 prev_state = phy->mt76->dfs_state; 2186 dfs_state = mt76_phy_dfs_state(phy->mt76); 2187 2188 if (prev_state == dfs_state) 2189 return 0; 2190 2191 if (prev_state == MT_DFS_STATE_UNKNOWN) 2192 mt7915_dfs_stop_radar_detector(phy); 2193 2194 if (dfs_state == MT_DFS_STATE_DISABLED) 2195 goto stop; 2196 2197 if (prev_state <= MT_DFS_STATE_DISABLED) { 2198 err = mt7915_dfs_init_radar_specs(phy); 2199 if (err < 0) 2200 return err; 2201 2202 err = mt7915_dfs_start_radar_detector(phy); 2203 if (err < 0) 2204 return err; 2205 2206 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2207 } 2208 2209 if (dfs_state == MT_DFS_STATE_CAC) 2210 return 0; 2211 2212 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 2213 phy->mt76->band_idx, MT_RX_SEL0, 0); 2214 if (err < 0) { 2215 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2216 return err; 2217 } 2218 2219 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2220 return 0; 2221 2222 stop: 2223 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 2224 phy->mt76->band_idx, MT_RX_SEL0, 0); 2225 if (err < 0) 2226 return err; 2227 2228 if (is_mt7915(&dev->mt76)) { 2229 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, 2230 phy->mt76->band_idx, 0, 2231 dev->dbdc_support ? 2 : 0); 2232 if (err < 0) 2233 return err; 2234 } 2235 2236 mt7915_dfs_stop_radar_detector(phy); 2237 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2238 2239 return 0; 2240 } 2241 2242 static int 2243 mt7915_mac_twt_duration_align(int duration) 2244 { 2245 return duration << 8; 2246 } 2247 2248 static u64 2249 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 2250 struct mt7915_twt_flow *flow) 2251 { 2252 struct mt7915_twt_flow *iter, *iter_next; 2253 u32 duration = flow->duration << 8; 2254 u64 start_tsf; 2255 2256 iter = list_first_entry_or_null(&dev->twt_list, 2257 struct mt7915_twt_flow, list); 2258 if (!iter || !iter->sched || iter->start_tsf > duration) { 2259 /* add flow as first entry in the list */ 2260 list_add(&flow->list, &dev->twt_list); 2261 return 0; 2262 } 2263 2264 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2265 start_tsf = iter->start_tsf + 2266 mt7915_mac_twt_duration_align(iter->duration); 2267 if (list_is_last(&iter->list, &dev->twt_list)) 2268 break; 2269 2270 if (!iter_next->sched || 2271 iter_next->start_tsf > start_tsf + duration) { 2272 list_add(&flow->list, &iter->list); 2273 goto out; 2274 } 2275 } 2276 2277 /* add flow as last entry in the list */ 2278 list_add_tail(&flow->list, &dev->twt_list); 2279 out: 2280 return start_tsf; 2281 } 2282 2283 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2284 { 2285 struct ieee80211_twt_params *twt_agrt; 2286 u64 interval, duration; 2287 u16 mantissa; 2288 u8 exp; 2289 2290 /* only individual agreement supported */ 2291 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2292 return -EOPNOTSUPP; 2293 2294 /* only 256us unit supported */ 2295 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2296 return -EOPNOTSUPP; 2297 2298 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2299 2300 /* explicit agreement not supported */ 2301 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2302 return -EOPNOTSUPP; 2303 2304 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2305 le16_to_cpu(twt_agrt->req_type)); 2306 mantissa = le16_to_cpu(twt_agrt->mantissa); 2307 duration = twt_agrt->min_twt_dur << 8; 2308 2309 interval = (u64)mantissa << exp; 2310 if (interval < duration) 2311 return -EOPNOTSUPP; 2312 2313 return 0; 2314 } 2315 2316 static bool 2317 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2318 struct ieee80211_twt_params *twt_agrt) 2319 { 2320 u16 type = le16_to_cpu(twt_agrt->req_type); 2321 u8 exp; 2322 int i; 2323 2324 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2325 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2326 struct mt7915_twt_flow *f; 2327 2328 if (!(msta->twt.flowid_mask & BIT(i))) 2329 continue; 2330 2331 f = &msta->twt.flow[i]; 2332 if (f->duration == twt_agrt->min_twt_dur && 2333 f->mantissa == twt_agrt->mantissa && 2334 f->exp == exp && 2335 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2336 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2337 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2338 return true; 2339 } 2340 2341 return false; 2342 } 2343 2344 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2345 struct ieee80211_sta *sta, 2346 struct ieee80211_twt_setup *twt) 2347 { 2348 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2349 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2350 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2351 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2352 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2353 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2354 struct mt7915_twt_flow *flow; 2355 int flowid, table_id; 2356 u8 exp; 2357 2358 if (mt7915_mac_check_twt_req(twt)) 2359 goto out; 2360 2361 mutex_lock(&dev->mt76.mutex); 2362 2363 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2364 goto unlock; 2365 2366 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2367 goto unlock; 2368 2369 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2370 setup_cmd = TWT_SETUP_CMD_DICTATE; 2371 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2372 goto unlock; 2373 } 2374 2375 flowid = ffs(~msta->twt.flowid_mask) - 1; 2376 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2377 twt_agrt->req_type |= le16_encode_bits(flowid, 2378 IEEE80211_TWT_REQTYPE_FLOWID); 2379 2380 table_id = ffs(~dev->twt.table_mask) - 1; 2381 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2382 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2383 2384 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2385 goto unlock; 2386 2387 flow = &msta->twt.flow[flowid]; 2388 memset(flow, 0, sizeof(*flow)); 2389 INIT_LIST_HEAD(&flow->list); 2390 flow->wcid = msta->wcid.idx; 2391 flow->table_id = table_id; 2392 flow->id = flowid; 2393 flow->duration = twt_agrt->min_twt_dur; 2394 flow->mantissa = twt_agrt->mantissa; 2395 flow->exp = exp; 2396 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2397 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2398 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2399 2400 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2401 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2402 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2403 u64 flow_tsf, curr_tsf; 2404 u32 rem; 2405 2406 flow->sched = true; 2407 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2408 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2409 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2410 flow_tsf = curr_tsf + interval - rem; 2411 twt_agrt->twt = cpu_to_le64(flow_tsf); 2412 } else { 2413 list_add_tail(&flow->list, &dev->twt_list); 2414 } 2415 flow->tsf = le64_to_cpu(twt_agrt->twt); 2416 2417 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2418 goto unlock; 2419 2420 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2421 dev->twt.table_mask |= BIT(table_id); 2422 msta->twt.flowid_mask |= BIT(flowid); 2423 dev->twt.n_agrt++; 2424 2425 unlock: 2426 mutex_unlock(&dev->mt76.mutex); 2427 out: 2428 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2429 twt_agrt->req_type |= 2430 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2431 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2432 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2433 } 2434 2435 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2436 struct mt7915_sta *msta, 2437 u8 flowid) 2438 { 2439 struct mt7915_twt_flow *flow; 2440 2441 lockdep_assert_held(&dev->mt76.mutex); 2442 2443 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2444 return; 2445 2446 if (!(msta->twt.flowid_mask & BIT(flowid))) 2447 return; 2448 2449 flow = &msta->twt.flow[flowid]; 2450 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2451 MCU_TWT_AGRT_DELETE)) 2452 return; 2453 2454 list_del_init(&flow->list); 2455 msta->twt.flowid_mask &= ~BIT(flowid); 2456 dev->twt.table_mask &= ~BIT(flow->table_id); 2457 dev->twt.n_agrt--; 2458 } 2459