1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) 17 #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ 18 IEEE80211_RADIOTAP_HE_##f) 19 20 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 21 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 22 .radar_pattern = { 23 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 24 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 25 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 26 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 27 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 28 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 29 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 30 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 31 }, 32 }; 33 34 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 35 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 36 .radar_pattern = { 37 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 38 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 39 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 40 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 41 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 42 }, 43 }; 44 45 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 46 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 47 .radar_pattern = { 48 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 49 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 50 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 51 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 52 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 53 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 54 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 55 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 56 }, 57 }; 58 59 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 60 u16 idx, bool unicast) 61 { 62 struct mt7996_sta *sta; 63 struct mt76_wcid *wcid; 64 65 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 66 return NULL; 67 68 wcid = rcu_dereference(dev->mt76.wcid[idx]); 69 if (unicast || !wcid) 70 return wcid; 71 72 if (!wcid->sta) 73 return NULL; 74 75 sta = container_of(wcid, struct mt7996_sta, wcid); 76 if (!sta->vif) 77 return NULL; 78 79 return &sta->vif->sta.wcid; 80 } 81 82 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 83 { 84 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 85 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 86 87 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 88 0, 5000); 89 } 90 91 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 92 { 93 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 94 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 95 96 return MT_WTBL_LMAC_OFFS(wcid, dw); 97 } 98 99 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 100 { 101 static const u8 ac_to_tid[] = { 102 [IEEE80211_AC_BE] = 0, 103 [IEEE80211_AC_BK] = 1, 104 [IEEE80211_AC_VI] = 4, 105 [IEEE80211_AC_VO] = 6 106 }; 107 struct ieee80211_sta *sta; 108 struct mt7996_sta *msta; 109 struct rate_info *rate; 110 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 111 LIST_HEAD(sta_poll_list); 112 int i; 113 114 spin_lock_bh(&dev->sta_poll_lock); 115 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 116 spin_unlock_bh(&dev->sta_poll_lock); 117 118 rcu_read_lock(); 119 120 while (true) { 121 bool clear = false; 122 u32 addr, val; 123 u16 idx; 124 s8 rssi[4]; 125 u8 bw; 126 127 spin_lock_bh(&dev->sta_poll_lock); 128 if (list_empty(&sta_poll_list)) { 129 spin_unlock_bh(&dev->sta_poll_lock); 130 break; 131 } 132 msta = list_first_entry(&sta_poll_list, 133 struct mt7996_sta, poll_list); 134 list_del_init(&msta->poll_list); 135 spin_unlock_bh(&dev->sta_poll_lock); 136 137 idx = msta->wcid.idx; 138 139 /* refresh peer's airtime reporting */ 140 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 141 142 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 143 u32 tx_last = msta->airtime_ac[i]; 144 u32 rx_last = msta->airtime_ac[i + 4]; 145 146 msta->airtime_ac[i] = mt76_rr(dev, addr); 147 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 148 149 tx_time[i] = msta->airtime_ac[i] - tx_last; 150 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 151 152 if ((tx_last | rx_last) & BIT(30)) 153 clear = true; 154 155 addr += 8; 156 } 157 158 if (clear) { 159 mt7996_mac_wtbl_update(dev, idx, 160 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 161 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 162 } 163 164 if (!msta->wcid.sta) 165 continue; 166 167 sta = container_of((void *)msta, struct ieee80211_sta, 168 drv_priv); 169 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 170 u8 q = mt76_connac_lmac_mapping(i); 171 u32 tx_cur = tx_time[q]; 172 u32 rx_cur = rx_time[q]; 173 u8 tid = ac_to_tid[i]; 174 175 if (!tx_cur && !rx_cur) 176 continue; 177 178 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 179 } 180 181 /* We don't support reading GI info from txs packets. 182 * For accurate tx status reporting and AQL improvement, 183 * we need to make sure that flags match so polling GI 184 * from per-sta counters directly. 185 */ 186 rate = &msta->wcid.rate; 187 188 switch (rate->bw) { 189 case RATE_INFO_BW_320: 190 bw = IEEE80211_STA_RX_BW_320; 191 break; 192 case RATE_INFO_BW_160: 193 bw = IEEE80211_STA_RX_BW_160; 194 break; 195 case RATE_INFO_BW_80: 196 bw = IEEE80211_STA_RX_BW_80; 197 break; 198 case RATE_INFO_BW_40: 199 bw = IEEE80211_STA_RX_BW_40; 200 break; 201 default: 202 bw = IEEE80211_STA_RX_BW_20; 203 break; 204 } 205 206 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6); 207 val = mt76_rr(dev, addr); 208 if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) { 209 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5); 210 val = mt76_rr(dev, addr); 211 rate->eht_gi = FIELD_GET(GENMASK(25, 24), val); 212 } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 213 u8 offs = 24 + 2 * bw; 214 215 rate->he_gi = (val & (0x3 << offs)) >> offs; 216 } else if (rate->flags & 217 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 218 if (val & BIT(12 + bw)) 219 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 220 else 221 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 222 } 223 224 /* get signal strength of resp frames (CTS/BA/ACK) */ 225 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 226 val = mt76_rr(dev, addr); 227 228 rssi[0] = to_rssi(GENMASK(7, 0), val); 229 rssi[1] = to_rssi(GENMASK(15, 8), val); 230 rssi[2] = to_rssi(GENMASK(23, 16), val); 231 rssi[3] = to_rssi(GENMASK(31, 14), val); 232 233 msta->ack_signal = 234 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 235 236 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 237 } 238 239 rcu_read_unlock(); 240 } 241 242 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev, 243 struct ieee80211_vif *vif, bool enable) 244 { 245 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 246 u32 addr; 247 248 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 249 if (enable) 250 mt76_set(dev, addr, BIT(5)); 251 else 252 mt76_clear(dev, addr, BIT(5)); 253 } 254 255 void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev, 256 u8 tbl_idx, u16 rate_idx) 257 { 258 u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx; 259 260 mt76_wr(dev, MT_WTBL_ITDR0, rate_idx); 261 /* use wtbl spe idx */ 262 mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL); 263 mt76_wr(dev, MT_WTBL_ITCR, ctrl); 264 } 265 266 static void 267 mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, 268 struct ieee80211_radiotap_he *he, 269 __le32 *rxv) 270 { 271 u32 ru, offs = 0; 272 273 ru = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC); 274 275 status->bw = RATE_INFO_BW_HE_RU; 276 277 switch (ru) { 278 case 0 ... 36: 279 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; 280 offs = ru; 281 break; 282 case 37 ... 52: 283 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; 284 offs = ru - 37; 285 break; 286 case 53 ... 60: 287 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; 288 offs = ru - 53; 289 break; 290 case 61 ... 64: 291 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; 292 offs = ru - 61; 293 break; 294 case 65 ... 66: 295 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; 296 offs = ru - 65; 297 break; 298 case 67: 299 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; 300 break; 301 case 68: 302 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; 303 break; 304 } 305 306 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); 307 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | 308 le16_encode_bits(offs, 309 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); 310 } 311 312 static void 313 mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) 314 { 315 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 316 static const struct ieee80211_radiotap_he_mu mu_known = { 317 .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | 318 HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | 319 HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | 320 HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN), 321 .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), 322 }; 323 struct ieee80211_radiotap_he_mu *he_mu = NULL; 324 325 status->flag |= RX_FLAG_RADIOTAP_HE_MU; 326 327 he_mu = skb_push(skb, sizeof(mu_known)); 328 memcpy(he_mu, &mu_known, sizeof(mu_known)); 329 330 #define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) 331 332 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); 333 if (status->he_dcm) 334 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); 335 336 he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | 337 MU_PREP(FLAGS2_SIG_B_SYMS_USERS, 338 le32_get_bits(rxv[4], MT_CRXV_HE_NUM_USER)); 339 340 he_mu->ru_ch1[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU0) & 0xff; 341 342 if (status->bw >= RATE_INFO_BW_40) { 343 he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); 344 he_mu->ru_ch2[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU1) & 0xff; 345 } 346 347 if (status->bw >= RATE_INFO_BW_80) { 348 u32 ru_h, ru_l; 349 350 he_mu->ru_ch1[1] = le32_get_bits(rxv[16], MT_CRXV_HE_RU2) & 0xff; 351 352 ru_l = le32_get_bits(rxv[16], MT_CRXV_HE_RU3_L); 353 ru_h = le32_get_bits(rxv[17], MT_CRXV_HE_RU3_H) & 0x7; 354 he_mu->ru_ch2[1] = (u8)(ru_l | ru_h << 4); 355 } 356 } 357 358 static void 359 mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode) 360 { 361 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 362 static const struct ieee80211_radiotap_he known = { 363 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | 364 HE_BITS(DATA1_DATA_DCM_KNOWN) | 365 HE_BITS(DATA1_STBC_KNOWN) | 366 HE_BITS(DATA1_CODING_KNOWN) | 367 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | 368 HE_BITS(DATA1_DOPPLER_KNOWN) | 369 HE_BITS(DATA1_SPTL_REUSE_KNOWN) | 370 HE_BITS(DATA1_BSS_COLOR_KNOWN), 371 .data2 = HE_BITS(DATA2_GI_KNOWN) | 372 HE_BITS(DATA2_TXBF_KNOWN) | 373 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | 374 HE_BITS(DATA2_TXOP_KNOWN), 375 }; 376 struct ieee80211_radiotap_he *he = NULL; 377 u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1; 378 379 status->flag |= RX_FLAG_RADIOTAP_HE; 380 381 he = skb_push(skb, sizeof(known)); 382 memcpy(he, &known, sizeof(known)); 383 384 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[9]) | 385 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[4]); 386 he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[13]); 387 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[5]) | 388 le16_encode_bits(ltf_size, 389 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); 390 if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) 391 he->data5 |= HE_BITS(DATA5_TXBF); 392 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[9]) | 393 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[9]); 394 395 switch (mode) { 396 case MT_PHY_TYPE_HE_SU: 397 he->data1 |= HE_BITS(DATA1_FORMAT_SU) | 398 HE_BITS(DATA1_UL_DL_KNOWN) | 399 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | 400 HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); 401 402 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[8]) | 403 HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); 404 break; 405 case MT_PHY_TYPE_HE_EXT_SU: 406 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | 407 HE_BITS(DATA1_UL_DL_KNOWN) | 408 HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); 409 410 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); 411 break; 412 case MT_PHY_TYPE_HE_MU: 413 he->data1 |= HE_BITS(DATA1_FORMAT_MU) | 414 HE_BITS(DATA1_UL_DL_KNOWN); 415 416 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); 417 he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[8]); 418 419 mt7996_mac_decode_he_radiotap_ru(status, he, rxv); 420 mt7996_mac_decode_he_mu_radiotap(skb, rxv); 421 break; 422 case MT_PHY_TYPE_HE_TB: 423 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | 424 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | 425 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | 426 HE_BITS(DATA1_SPTL_REUSE4_KNOWN); 427 428 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[13]) | 429 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[13]) | 430 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[13]) | 431 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[13]); 432 433 mt7996_mac_decode_he_radiotap_ru(status, he, rxv); 434 break; 435 default: 436 break; 437 } 438 } 439 440 /* The HW does not translate the mac header to 802.3 for mesh point */ 441 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 442 { 443 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 444 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 445 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid; 446 __le32 *rxd = (__le32 *)skb->data; 447 struct ieee80211_sta *sta; 448 struct ieee80211_vif *vif; 449 struct ieee80211_hdr hdr; 450 u16 frame_control; 451 452 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 453 MT_RXD3_NORMAL_U2M) 454 return -EINVAL; 455 456 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 457 return -EINVAL; 458 459 if (!msta || !msta->vif) 460 return -EINVAL; 461 462 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 463 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 464 465 /* store the info from RXD and ethhdr to avoid being overridden */ 466 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 467 hdr.frame_control = cpu_to_le16(frame_control); 468 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 469 hdr.duration_id = 0; 470 471 ether_addr_copy(hdr.addr1, vif->addr); 472 ether_addr_copy(hdr.addr2, sta->addr); 473 switch (frame_control & (IEEE80211_FCTL_TODS | 474 IEEE80211_FCTL_FROMDS)) { 475 case 0: 476 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 477 break; 478 case IEEE80211_FCTL_FROMDS: 479 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 480 break; 481 case IEEE80211_FCTL_TODS: 482 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 483 break; 484 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 485 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 486 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 487 break; 488 default: 489 return -EINVAL; 490 } 491 492 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 493 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 494 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 495 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 496 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 497 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 498 else 499 skb_pull(skb, 2); 500 501 if (ieee80211_has_order(hdr.frame_control)) 502 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 503 IEEE80211_HT_CTL_LEN); 504 if (ieee80211_is_data_qos(hdr.frame_control)) { 505 __le16 qos_ctrl; 506 507 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 508 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 509 IEEE80211_QOS_CTL_LEN); 510 } 511 512 if (ieee80211_has_a4(hdr.frame_control)) 513 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 514 else 515 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 516 517 return 0; 518 } 519 520 static int 521 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 522 struct mt76_rx_status *status, 523 struct ieee80211_supported_band *sband, 524 __le32 *rxv, u8 *mode) 525 { 526 u32 v0, v2; 527 u8 stbc, gi, bw, dcm, nss; 528 int i, idx; 529 bool cck = false; 530 531 v0 = le32_to_cpu(rxv[0]); 532 v2 = le32_to_cpu(rxv[2]); 533 534 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 535 i = idx; 536 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 537 538 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 539 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 540 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 541 dcm = FIELD_GET(MT_PRXV_DCM, v2); 542 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 543 544 switch (*mode) { 545 case MT_PHY_TYPE_CCK: 546 cck = true; 547 fallthrough; 548 case MT_PHY_TYPE_OFDM: 549 i = mt76_get_rate(&dev->mt76, sband, i, cck); 550 break; 551 case MT_PHY_TYPE_HT_GF: 552 case MT_PHY_TYPE_HT: 553 status->encoding = RX_ENC_HT; 554 if (gi) 555 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 556 if (i > 31) 557 return -EINVAL; 558 break; 559 case MT_PHY_TYPE_VHT: 560 status->nss = nss; 561 status->encoding = RX_ENC_VHT; 562 if (gi) 563 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 564 if (i > 11) 565 return -EINVAL; 566 break; 567 case MT_PHY_TYPE_HE_MU: 568 case MT_PHY_TYPE_HE_SU: 569 case MT_PHY_TYPE_HE_EXT_SU: 570 case MT_PHY_TYPE_HE_TB: 571 status->nss = nss; 572 status->encoding = RX_ENC_HE; 573 i &= GENMASK(3, 0); 574 575 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 576 status->he_gi = gi; 577 578 status->he_dcm = dcm; 579 break; 580 case MT_PHY_TYPE_EHT_SU: 581 case MT_PHY_TYPE_EHT_TRIG: 582 case MT_PHY_TYPE_EHT_MU: 583 status->nss = nss; 584 status->encoding = RX_ENC_EHT; 585 i &= GENMASK(3, 0); 586 587 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 588 status->eht.gi = gi; 589 break; 590 default: 591 return -EINVAL; 592 } 593 status->rate_idx = i; 594 595 switch (bw) { 596 case IEEE80211_STA_RX_BW_20: 597 break; 598 case IEEE80211_STA_RX_BW_40: 599 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 600 (idx & MT_PRXV_TX_ER_SU_106T)) { 601 status->bw = RATE_INFO_BW_HE_RU; 602 status->he_ru = 603 NL80211_RATE_INFO_HE_RU_ALLOC_106; 604 } else { 605 status->bw = RATE_INFO_BW_40; 606 } 607 break; 608 case IEEE80211_STA_RX_BW_80: 609 status->bw = RATE_INFO_BW_80; 610 break; 611 case IEEE80211_STA_RX_BW_160: 612 status->bw = RATE_INFO_BW_160; 613 break; 614 case IEEE80211_STA_RX_BW_320: 615 status->bw = RATE_INFO_BW_320; 616 break; 617 default: 618 return -EINVAL; 619 } 620 621 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 622 if (*mode < MT_PHY_TYPE_HE_SU && gi) 623 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 624 625 return 0; 626 } 627 628 static int 629 mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) 630 { 631 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 632 struct mt76_phy *mphy = &dev->mt76.phy; 633 struct mt7996_phy *phy = &dev->phy; 634 struct ieee80211_supported_band *sband; 635 __le32 *rxd = (__le32 *)skb->data; 636 __le32 *rxv = NULL; 637 u32 rxd0 = le32_to_cpu(rxd[0]); 638 u32 rxd1 = le32_to_cpu(rxd[1]); 639 u32 rxd2 = le32_to_cpu(rxd[2]); 640 u32 rxd3 = le32_to_cpu(rxd[3]); 641 u32 rxd4 = le32_to_cpu(rxd[4]); 642 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 643 u32 csum_status = *(u32 *)skb->cb; 644 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 645 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 646 bool unicast, insert_ccmp_hdr = false; 647 u8 remove_pad, amsdu_info, band_idx; 648 u8 mode = 0, qos_ctl = 0; 649 bool hdr_trans; 650 u16 hdr_gap; 651 u16 seq_ctrl = 0; 652 __le16 fc = 0; 653 int idx; 654 655 memset(status, 0, sizeof(*status)); 656 657 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 658 mphy = dev->mt76.phys[band_idx]; 659 phy = mphy->priv; 660 status->phy_idx = mphy->band_idx; 661 662 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 663 return -EINVAL; 664 665 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 666 return -EINVAL; 667 668 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 669 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 670 return -EINVAL; 671 672 /* ICV error or CCMP/BIP/WPI MIC error */ 673 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 674 status->flag |= RX_FLAG_ONLY_MONITOR; 675 676 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 677 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 678 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); 679 680 if (status->wcid) { 681 struct mt7996_sta *msta; 682 683 msta = container_of(status->wcid, struct mt7996_sta, wcid); 684 spin_lock_bh(&dev->sta_poll_lock); 685 if (list_empty(&msta->poll_list)) 686 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 687 spin_unlock_bh(&dev->sta_poll_lock); 688 } 689 690 status->freq = mphy->chandef.chan->center_freq; 691 status->band = mphy->chandef.chan->band; 692 if (status->band == NL80211_BAND_5GHZ) 693 sband = &mphy->sband_5g.sband; 694 else if (status->band == NL80211_BAND_6GHZ) 695 sband = &mphy->sband_6g.sband; 696 else 697 sband = &mphy->sband_2g.sband; 698 699 if (!sband->channels) 700 return -EINVAL; 701 702 if ((rxd0 & csum_mask) == csum_mask && 703 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 704 skb->ip_summed = CHECKSUM_UNNECESSARY; 705 706 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 707 status->flag |= RX_FLAG_FAILED_FCS_CRC; 708 709 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 710 status->flag |= RX_FLAG_MMIC_ERROR; 711 712 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 713 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 714 status->flag |= RX_FLAG_DECRYPTED; 715 status->flag |= RX_FLAG_IV_STRIPPED; 716 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 717 } 718 719 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 720 721 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 722 return -EINVAL; 723 724 rxd += 8; 725 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 726 u32 v0 = le32_to_cpu(rxd[0]); 727 u32 v2 = le32_to_cpu(rxd[2]); 728 729 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 730 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 731 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 732 733 rxd += 4; 734 if ((u8 *)rxd - skb->data >= skb->len) 735 return -EINVAL; 736 } 737 738 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 739 u8 *data = (u8 *)rxd; 740 741 if (status->flag & RX_FLAG_DECRYPTED) { 742 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 743 case MT_CIPHER_AES_CCMP: 744 case MT_CIPHER_CCMP_CCX: 745 case MT_CIPHER_CCMP_256: 746 insert_ccmp_hdr = 747 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 748 fallthrough; 749 case MT_CIPHER_TKIP: 750 case MT_CIPHER_TKIP_NO_MIC: 751 case MT_CIPHER_GCMP: 752 case MT_CIPHER_GCMP_256: 753 status->iv[0] = data[5]; 754 status->iv[1] = data[4]; 755 status->iv[2] = data[3]; 756 status->iv[3] = data[2]; 757 status->iv[4] = data[1]; 758 status->iv[5] = data[0]; 759 break; 760 default: 761 break; 762 } 763 } 764 rxd += 4; 765 if ((u8 *)rxd - skb->data >= skb->len) 766 return -EINVAL; 767 } 768 769 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 770 status->timestamp = le32_to_cpu(rxd[0]); 771 status->flag |= RX_FLAG_MACTIME_START; 772 773 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 774 status->flag |= RX_FLAG_AMPDU_DETAILS; 775 776 /* all subframes of an A-MPDU have the same timestamp */ 777 if (phy->rx_ampdu_ts != status->timestamp) { 778 if (!++phy->ampdu_ref) 779 phy->ampdu_ref++; 780 } 781 phy->rx_ampdu_ts = status->timestamp; 782 783 status->ampdu_ref = phy->ampdu_ref; 784 } 785 786 rxd += 4; 787 if ((u8 *)rxd - skb->data >= skb->len) 788 return -EINVAL; 789 } 790 791 /* RXD Group 3 - P-RXV */ 792 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 793 u32 v3; 794 int ret; 795 796 rxv = rxd; 797 rxd += 4; 798 if ((u8 *)rxd - skb->data >= skb->len) 799 return -EINVAL; 800 801 v3 = le32_to_cpu(rxv[3]); 802 803 status->chains = mphy->antenna_mask; 804 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 805 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 806 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 807 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 808 809 /* RXD Group 5 - C-RXV */ 810 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 811 rxd += 24; 812 if ((u8 *)rxd - skb->data >= skb->len) 813 return -EINVAL; 814 } 815 816 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 817 if (ret < 0) 818 return ret; 819 } 820 821 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 822 status->amsdu = !!amsdu_info; 823 if (status->amsdu) { 824 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 825 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 826 } 827 828 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 829 if (hdr_trans && ieee80211_has_morefrags(fc)) { 830 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 831 return -EINVAL; 832 hdr_trans = false; 833 } else { 834 int pad_start = 0; 835 836 skb_pull(skb, hdr_gap); 837 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 838 pad_start = ieee80211_get_hdrlen_from_skb(skb); 839 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR) && 840 get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) { 841 /* When header translation failure is indicated, 842 * the hardware will insert an extra 2-byte field 843 * containing the data length after the protocol 844 * type field. 845 */ 846 pad_start = 16; 847 } 848 849 if (pad_start) { 850 memmove(skb->data + 2, skb->data, pad_start); 851 skb_pull(skb, 2); 852 } 853 } 854 855 if (!hdr_trans) { 856 struct ieee80211_hdr *hdr; 857 858 if (insert_ccmp_hdr) { 859 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 860 861 mt76_insert_ccmp_hdr(skb, key_id); 862 } 863 864 hdr = mt76_skb_get_hdr(skb); 865 fc = hdr->frame_control; 866 if (ieee80211_is_data_qos(fc)) { 867 u8 *qos = ieee80211_get_qos_ctl(hdr); 868 869 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 870 qos_ctl = *qos; 871 872 /* Mesh DA/SA/Length will be stripped after hardware 873 * de-amsdu, so here needs to clear amsdu present bit 874 * to mark it as a normal mesh frame. 875 */ 876 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 877 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 878 } 879 } else { 880 status->flag |= RX_FLAG_8023; 881 } 882 883 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 884 mt7996_mac_decode_he_radiotap(skb, rxv, mode); 885 886 if (!status->wcid || !ieee80211_is_data_qos(fc)) 887 return 0; 888 889 status->aggr = unicast && 890 !ieee80211_is_qos_nullfunc(fc); 891 status->qos_ctl = qos_ctl; 892 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 893 894 return 0; 895 } 896 897 static void 898 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 899 struct sk_buff *skb, struct mt76_wcid *wcid) 900 { 901 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 902 u8 fc_type, fc_stype; 903 u16 ethertype; 904 bool wmm = false; 905 u32 val; 906 907 if (wcid->sta) { 908 struct ieee80211_sta *sta; 909 910 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 911 wmm = sta->wme; 912 } 913 914 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 915 FIELD_PREP(MT_TXD1_TID, tid); 916 917 ethertype = get_unaligned_be16(&skb->data[12]); 918 if (ethertype >= ETH_P_802_3_MIN) 919 val |= MT_TXD1_ETH_802_3; 920 921 txwi[1] |= cpu_to_le32(val); 922 923 fc_type = IEEE80211_FTYPE_DATA >> 2; 924 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 925 926 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 927 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 928 929 txwi[2] |= cpu_to_le32(val); 930 } 931 932 static void 933 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 934 struct sk_buff *skb, struct ieee80211_key_conf *key) 935 { 936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 937 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 938 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 939 bool multicast = is_multicast_ether_addr(hdr->addr1); 940 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 941 __le16 fc = hdr->frame_control; 942 u8 fc_type, fc_stype; 943 u32 val; 944 945 if (ieee80211_is_action(fc) && 946 mgmt->u.action.category == WLAN_CATEGORY_BACK && 947 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) 948 tid = MT_TX_ADDBA; 949 else if (ieee80211_is_mgmt(hdr->frame_control)) 950 tid = MT_TX_NORMAL; 951 952 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 953 FIELD_PREP(MT_TXD1_HDR_INFO, 954 ieee80211_get_hdrlen_from_skb(skb) / 2) | 955 FIELD_PREP(MT_TXD1_TID, tid); 956 957 if (!ieee80211_is_data(fc) || multicast || 958 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 959 val |= MT_TXD1_FIXED_RATE; 960 961 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && 962 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 963 val |= MT_TXD1_BIP; 964 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 965 } 966 967 txwi[1] |= cpu_to_le32(val); 968 969 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 970 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 971 972 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 973 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 974 975 txwi[2] |= cpu_to_le32(val); 976 977 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 978 if (ieee80211_is_beacon(fc)) { 979 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 980 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 981 } 982 983 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 984 u16 seqno = le16_to_cpu(hdr->seq_ctrl); 985 986 if (ieee80211_is_back_req(hdr->frame_control)) { 987 struct ieee80211_bar *bar; 988 989 bar = (struct ieee80211_bar *)skb->data; 990 seqno = le16_to_cpu(bar->start_seq_num); 991 } 992 993 val = MT_TXD3_SN_VALID | 994 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 995 txwi[3] |= cpu_to_le32(val); 996 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 997 } 998 } 999 1000 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 1001 struct sk_buff *skb, struct mt76_wcid *wcid, 1002 struct ieee80211_key_conf *key, int pid, 1003 enum mt76_txq_id qid, u32 changed) 1004 { 1005 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1006 struct ieee80211_vif *vif = info->control.vif; 1007 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1008 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 1009 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1010 struct mt7996_vif *mvif; 1011 u16 tx_count = 15; 1012 u32 val; 1013 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 1014 BSS_CHANGED_BEACON_ENABLED)); 1015 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 1016 BSS_CHANGED_FILS_DISCOVERY)); 1017 1018 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 1019 if (mvif) { 1020 omac_idx = mvif->mt76.omac_idx; 1021 wmm_idx = mvif->mt76.wmm_idx; 1022 band_idx = mvif->mt76.band_idx; 1023 } 1024 1025 if (inband_disc) { 1026 p_fmt = MT_TX_TYPE_FW; 1027 q_idx = MT_LMAC_ALTX0; 1028 } else if (beacon) { 1029 p_fmt = MT_TX_TYPE_FW; 1030 q_idx = MT_LMAC_BCN0; 1031 } else if (qid >= MT_TXQ_PSD) { 1032 p_fmt = MT_TX_TYPE_CT; 1033 q_idx = MT_LMAC_ALTX0; 1034 } else { 1035 p_fmt = MT_TX_TYPE_CT; 1036 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 1037 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 1038 } 1039 1040 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 1041 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 1042 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 1043 txwi[0] = cpu_to_le32(val); 1044 1045 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 1046 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 1047 1048 if (band_idx) 1049 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 1050 1051 txwi[1] = cpu_to_le32(val); 1052 txwi[2] = 0; 1053 1054 val = MT_TXD3_SW_POWER_MGMT | 1055 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 1056 if (key) 1057 val |= MT_TXD3_PROTECT_FRAME; 1058 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1059 val |= MT_TXD3_NO_ACK; 1060 if (wcid->amsdu) 1061 val |= MT_TXD3_HW_AMSDU; 1062 1063 txwi[3] = cpu_to_le32(val); 1064 txwi[4] = 0; 1065 1066 val = FIELD_PREP(MT_TXD5_PID, pid); 1067 if (pid >= MT_PACKET_ID_FIRST) 1068 val |= MT_TXD5_TX_STATUS_HOST; 1069 txwi[5] = cpu_to_le32(val); 1070 1071 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | 1072 FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 1073 txwi[6] = cpu_to_le32(val); 1074 txwi[7] = 0; 1075 1076 if (is_8023) 1077 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 1078 else 1079 mt7996_mac_write_txwi_80211(dev, txwi, skb, key); 1080 1081 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 1082 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1083 bool mcast = ieee80211_is_data(hdr->frame_control) && 1084 is_multicast_ether_addr(hdr->addr1); 1085 u8 idx = MT7996_BASIC_RATES_TBL; 1086 1087 if (mvif) { 1088 if (mcast && mvif->mcast_rates_idx) 1089 idx = mvif->mcast_rates_idx; 1090 else if (beacon && mvif->beacon_rates_idx) 1091 idx = mvif->beacon_rates_idx; 1092 else 1093 idx = mvif->basic_rates_idx; 1094 } 1095 1096 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); 1097 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 1098 } 1099 } 1100 1101 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1102 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1103 struct ieee80211_sta *sta, 1104 struct mt76_tx_info *tx_info) 1105 { 1106 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1107 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1108 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1109 struct ieee80211_key_conf *key = info->control.hw_key; 1110 struct ieee80211_vif *vif = info->control.vif; 1111 struct mt76_connac_txp_common *txp; 1112 struct mt76_txwi_cache *t; 1113 int id, i, pid, nbuf = tx_info->nbuf - 1; 1114 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1115 u8 *txwi = (u8 *)txwi_ptr; 1116 1117 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1118 return -EINVAL; 1119 1120 if (!wcid) 1121 wcid = &dev->mt76.global_wcid; 1122 1123 if (sta) { 1124 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 1125 1126 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 1127 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1128 msta->jiffies = jiffies; 1129 } 1130 } 1131 1132 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1133 t->skb = tx_info->skb; 1134 1135 id = mt76_token_consume(mdev, &t); 1136 if (id < 0) 1137 return id; 1138 1139 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1140 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1141 pid, qid, 0); 1142 1143 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1144 for (i = 0; i < nbuf; i++) { 1145 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1146 txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 1147 } 1148 txp->fw.nbuf = nbuf; 1149 1150 txp->fw.flags = 1151 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD); 1152 1153 if (!key) 1154 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1155 1156 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control)) 1157 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1158 1159 if (vif) { 1160 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 1161 1162 txp->fw.bss_idx = mvif->mt76.idx; 1163 } 1164 1165 txp->fw.token = cpu_to_le16(id); 1166 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 1167 txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx); 1168 else 1169 txp->fw.rept_wds_wcid = cpu_to_le16(0xfff); 1170 tx_info->skb = DMA_DUMMY_DATA; 1171 1172 /* pass partial skb header to fw */ 1173 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1174 tx_info->buf[1].skip_unmap = true; 1175 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1176 1177 return 0; 1178 } 1179 1180 static void 1181 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 1182 { 1183 struct mt7996_sta *msta; 1184 u16 fc, tid; 1185 u32 val; 1186 1187 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1188 return; 1189 1190 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 1191 if (tid >= 6) /* skip VO queue */ 1192 return; 1193 1194 val = le32_to_cpu(txwi[2]); 1195 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 1196 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 1197 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1198 return; 1199 1200 msta = (struct mt7996_sta *)sta->drv_priv; 1201 if (!test_and_set_bit(tid, &msta->ampdu_state)) 1202 ieee80211_start_tx_ba_session(sta, tid, 0); 1203 } 1204 1205 static void 1206 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1207 struct ieee80211_sta *sta, struct list_head *free_list) 1208 { 1209 struct mt76_dev *mdev = &dev->mt76; 1210 struct mt76_wcid *wcid; 1211 __le32 *txwi; 1212 u16 wcid_idx; 1213 1214 mt76_connac_txp_skb_unmap(mdev, t); 1215 if (!t->skb) 1216 goto out; 1217 1218 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1219 if (sta) { 1220 wcid = (struct mt76_wcid *)sta->drv_priv; 1221 wcid_idx = wcid->idx; 1222 1223 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1224 mt7996_tx_check_aggr(sta, txwi); 1225 } else { 1226 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 1227 } 1228 1229 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1230 1231 out: 1232 t->skb = NULL; 1233 mt76_put_txwi(mdev, t); 1234 } 1235 1236 static void 1237 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1238 { 1239 __le32 *tx_free = (__le32 *)data, *cur_info; 1240 struct mt76_dev *mdev = &dev->mt76; 1241 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1242 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1243 struct mt76_txwi_cache *txwi; 1244 struct ieee80211_sta *sta = NULL; 1245 LIST_HEAD(free_list); 1246 struct sk_buff *skb, *tmp; 1247 void *end = data + len; 1248 bool wake = false; 1249 u16 total, count = 0; 1250 1251 /* clean DMA queues and unmap buffers first */ 1252 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1253 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1254 if (phy2) { 1255 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1256 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1257 } 1258 if (phy3) { 1259 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1260 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1261 } 1262 1263 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) 1264 return; 1265 1266 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1267 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1268 u32 msdu, info; 1269 u8 i; 1270 1271 if (WARN_ON_ONCE((void *)cur_info >= end)) 1272 return; 1273 /* 1'b1: new wcid pair. 1274 * 1'b0: msdu_id with the same 'wcid pair' as above. 1275 */ 1276 info = le32_to_cpu(*cur_info); 1277 if (info & MT_TXFREE_INFO_PAIR) { 1278 struct mt7996_sta *msta; 1279 struct mt76_wcid *wcid; 1280 u16 idx; 1281 1282 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1283 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1284 sta = wcid_to_sta(wcid); 1285 if (!sta) 1286 continue; 1287 1288 msta = container_of(wcid, struct mt7996_sta, wcid); 1289 spin_lock_bh(&dev->sta_poll_lock); 1290 if (list_empty(&msta->poll_list)) 1291 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1292 spin_unlock_bh(&dev->sta_poll_lock); 1293 continue; 1294 } 1295 1296 if (info & MT_TXFREE_INFO_HEADER) 1297 continue; 1298 1299 for (i = 0; i < 2; i++) { 1300 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1301 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1302 continue; 1303 1304 count++; 1305 txwi = mt76_token_release(mdev, msdu, &wake); 1306 if (!txwi) 1307 continue; 1308 1309 mt7996_txwi_free(dev, txwi, sta, &free_list); 1310 } 1311 } 1312 1313 mt7996_mac_sta_poll(dev); 1314 1315 if (wake) 1316 mt76_set_tx_blocked(&dev->mt76, false); 1317 1318 mt76_worker_schedule(&dev->mt76.tx_worker); 1319 1320 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1321 skb_list_del_init(skb); 1322 napi_consume_skb(skb, 1); 1323 } 1324 } 1325 1326 static bool 1327 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid, 1328 __le32 *txs_data, struct mt76_sta_stats *stats) 1329 { 1330 struct ieee80211_supported_band *sband; 1331 struct mt76_dev *mdev = &dev->mt76; 1332 struct mt76_phy *mphy; 1333 struct ieee80211_tx_info *info; 1334 struct sk_buff_head list; 1335 struct rate_info rate = {}; 1336 struct sk_buff *skb; 1337 bool cck = false; 1338 u32 txrate, txs, mode, stbc; 1339 1340 mt76_tx_status_lock(mdev, &list); 1341 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1342 if (!skb) 1343 goto out_no_skb; 1344 1345 txs = le32_to_cpu(txs_data[0]); 1346 1347 info = IEEE80211_SKB_CB(skb); 1348 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1349 info->flags |= IEEE80211_TX_STAT_ACK; 1350 1351 info->status.ampdu_len = 1; 1352 info->status.ampdu_ack_len = !!(info->flags & 1353 IEEE80211_TX_STAT_ACK); 1354 1355 info->status.rates[0].idx = -1; 1356 1357 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1358 1359 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1360 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1361 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1362 1363 if (stbc && rate.nss > 1) 1364 rate.nss >>= 1; 1365 1366 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1367 stats->tx_nss[rate.nss - 1]++; 1368 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1369 stats->tx_mcs[rate.mcs]++; 1370 1371 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1372 switch (mode) { 1373 case MT_PHY_TYPE_CCK: 1374 cck = true; 1375 fallthrough; 1376 case MT_PHY_TYPE_OFDM: 1377 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1378 1379 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1380 sband = &mphy->sband_5g.sband; 1381 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1382 sband = &mphy->sband_6g.sband; 1383 else 1384 sband = &mphy->sband_2g.sband; 1385 1386 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1387 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1388 break; 1389 case MT_PHY_TYPE_HT: 1390 case MT_PHY_TYPE_HT_GF: 1391 if (rate.mcs > 31) 1392 goto out; 1393 1394 rate.flags = RATE_INFO_FLAGS_MCS; 1395 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1396 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1397 break; 1398 case MT_PHY_TYPE_VHT: 1399 if (rate.mcs > 9) 1400 goto out; 1401 1402 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1403 break; 1404 case MT_PHY_TYPE_HE_SU: 1405 case MT_PHY_TYPE_HE_EXT_SU: 1406 case MT_PHY_TYPE_HE_TB: 1407 case MT_PHY_TYPE_HE_MU: 1408 if (rate.mcs > 11) 1409 goto out; 1410 1411 rate.he_gi = wcid->rate.he_gi; 1412 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1413 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1414 break; 1415 case MT_PHY_TYPE_EHT_SU: 1416 case MT_PHY_TYPE_EHT_TRIG: 1417 case MT_PHY_TYPE_EHT_MU: 1418 if (rate.mcs > 13) 1419 goto out; 1420 1421 rate.eht_gi = wcid->rate.eht_gi; 1422 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1423 break; 1424 default: 1425 goto out; 1426 } 1427 1428 stats->tx_mode[mode]++; 1429 1430 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1431 case IEEE80211_STA_RX_BW_320: 1432 rate.bw = RATE_INFO_BW_320; 1433 stats->tx_bw[4]++; 1434 break; 1435 case IEEE80211_STA_RX_BW_160: 1436 rate.bw = RATE_INFO_BW_160; 1437 stats->tx_bw[3]++; 1438 break; 1439 case IEEE80211_STA_RX_BW_80: 1440 rate.bw = RATE_INFO_BW_80; 1441 stats->tx_bw[2]++; 1442 break; 1443 case IEEE80211_STA_RX_BW_40: 1444 rate.bw = RATE_INFO_BW_40; 1445 stats->tx_bw[1]++; 1446 break; 1447 default: 1448 rate.bw = RATE_INFO_BW_20; 1449 stats->tx_bw[0]++; 1450 break; 1451 } 1452 wcid->rate = rate; 1453 1454 out: 1455 mt76_tx_status_skb_done(mdev, skb, &list); 1456 1457 out_no_skb: 1458 mt76_tx_status_unlock(mdev, &list); 1459 1460 return !!skb; 1461 } 1462 1463 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1464 { 1465 struct mt7996_sta *msta = NULL; 1466 struct mt76_wcid *wcid; 1467 __le32 *txs_data = data; 1468 u16 wcidx; 1469 u8 pid; 1470 1471 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 1472 return; 1473 1474 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1475 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1476 1477 if (pid < MT_PACKET_ID_FIRST) 1478 return; 1479 1480 if (wcidx >= mt7996_wtbl_size(dev)) 1481 return; 1482 1483 rcu_read_lock(); 1484 1485 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1486 if (!wcid) 1487 goto out; 1488 1489 msta = container_of(wcid, struct mt7996_sta, wcid); 1490 1491 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats); 1492 1493 if (!wcid->sta) 1494 goto out; 1495 1496 spin_lock_bh(&dev->sta_poll_lock); 1497 if (list_empty(&msta->poll_list)) 1498 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1499 spin_unlock_bh(&dev->sta_poll_lock); 1500 1501 out: 1502 rcu_read_unlock(); 1503 } 1504 1505 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1506 { 1507 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1508 __le32 *rxd = (__le32 *)data; 1509 __le32 *end = (__le32 *)&rxd[len / 4]; 1510 enum rx_pkt_type type; 1511 1512 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1513 if (type != PKT_TYPE_NORMAL) { 1514 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1515 1516 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1517 MT_RXD0_SW_PKT_TYPE_FRAME)) 1518 return true; 1519 } 1520 1521 switch (type) { 1522 case PKT_TYPE_TXRX_NOTIFY: 1523 mt7996_mac_tx_free(dev, data, len); 1524 return false; 1525 case PKT_TYPE_TXS: 1526 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1527 mt7996_mac_add_txs(dev, rxd); 1528 return false; 1529 case PKT_TYPE_RX_FW_MONITOR: 1530 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1531 return false; 1532 default: 1533 return true; 1534 } 1535 } 1536 1537 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1538 struct sk_buff *skb, u32 *info) 1539 { 1540 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1541 __le32 *rxd = (__le32 *)skb->data; 1542 __le32 *end = (__le32 *)&skb->data[skb->len]; 1543 enum rx_pkt_type type; 1544 1545 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1546 if (type != PKT_TYPE_NORMAL) { 1547 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1548 1549 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1550 MT_RXD0_SW_PKT_TYPE_FRAME)) 1551 type = PKT_TYPE_NORMAL; 1552 } 1553 1554 switch (type) { 1555 case PKT_TYPE_TXRX_NOTIFY: 1556 mt7996_mac_tx_free(dev, skb->data, skb->len); 1557 napi_consume_skb(skb, 1); 1558 break; 1559 case PKT_TYPE_RX_EVENT: 1560 mt7996_mcu_rx_event(dev, skb); 1561 break; 1562 case PKT_TYPE_TXS: 1563 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1564 mt7996_mac_add_txs(dev, rxd); 1565 dev_kfree_skb(skb); 1566 break; 1567 case PKT_TYPE_RX_FW_MONITOR: 1568 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1569 dev_kfree_skb(skb); 1570 break; 1571 case PKT_TYPE_NORMAL: 1572 if (!mt7996_mac_fill_rx(dev, skb)) { 1573 mt76_rx(&dev->mt76, q, skb); 1574 return; 1575 } 1576 fallthrough; 1577 default: 1578 dev_kfree_skb(skb); 1579 break; 1580 } 1581 } 1582 1583 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 1584 { 1585 struct mt7996_dev *dev = phy->dev; 1586 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 1587 1588 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 1589 mt76_set(dev, reg, BIT(11) | BIT(9)); 1590 } 1591 1592 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 1593 { 1594 struct mt7996_dev *dev = phy->dev; 1595 u8 band_idx = phy->mt76->band_idx; 1596 int i; 1597 1598 for (i = 0; i < 16; i++) 1599 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 1600 1601 phy->mt76->survey_time = ktime_get_boottime(); 1602 1603 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1604 1605 /* reset airtime counters */ 1606 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 1607 MT_WF_RMAC_MIB_RXTIME_CLR); 1608 1609 mt7996_mcu_get_chan_mib_info(phy, true); 1610 } 1611 1612 void mt7996_mac_set_timing(struct mt7996_phy *phy) 1613 { 1614 s16 coverage_class = phy->coverage_class; 1615 struct mt7996_dev *dev = phy->dev; 1616 struct mt7996_phy *phy2 = mt7996_phy2(dev); 1617 struct mt7996_phy *phy3 = mt7996_phy3(dev); 1618 u32 val, reg_offset; 1619 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1620 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1621 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1622 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1623 u8 band_idx = phy->mt76->band_idx; 1624 int offset; 1625 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1626 1627 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1628 return; 1629 1630 if (phy2) 1631 coverage_class = max_t(s16, dev->phy.coverage_class, 1632 phy2->coverage_class); 1633 1634 if (phy3) 1635 coverage_class = max_t(s16, coverage_class, 1636 phy3->coverage_class); 1637 1638 mt76_set(dev, MT_ARB_SCR(band_idx), 1639 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1640 udelay(1); 1641 1642 offset = 3 * coverage_class; 1643 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1644 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1645 1646 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 1647 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 1648 mt76_wr(dev, MT_TMAC_ICR0(band_idx), 1649 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) | 1650 FIELD_PREP(MT_IFS_RIFS, 2) | 1651 FIELD_PREP(MT_IFS_SIFS, 10) | 1652 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1653 1654 if (!a_band) 1655 mt76_wr(dev, MT_TMAC_ICR1(band_idx), 1656 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1657 1658 if (phy->slottime < 20 || a_band) 1659 val = MT7996_CFEND_RATE_DEFAULT; 1660 else 1661 val = MT7996_CFEND_RATE_11B; 1662 1663 mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val); 1664 mt76_clear(dev, MT_ARB_SCR(band_idx), 1665 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1666 } 1667 1668 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 1669 { 1670 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 1671 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 1672 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 1673 1674 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 1675 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 1676 } 1677 1678 static u8 1679 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 1680 { 1681 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1682 struct mt7996_dev *dev = phy->dev; 1683 u32 val, sum = 0, n = 0; 1684 int ant, i; 1685 1686 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 1687 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 1688 1689 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1690 val = mt76_rr(dev, reg); 1691 sum += val * nf_power[i]; 1692 n += val; 1693 } 1694 } 1695 1696 return n ? sum / n : 0; 1697 } 1698 1699 void mt7996_update_channel(struct mt76_phy *mphy) 1700 { 1701 struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv; 1702 struct mt76_channel_state *state = mphy->chan_state; 1703 int nf; 1704 1705 mt7996_mcu_get_chan_mib_info(phy, false); 1706 1707 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 1708 if (!phy->noise) 1709 phy->noise = nf << 4; 1710 else if (nf) 1711 phy->noise += nf - (phy->noise >> 4); 1712 1713 state->noise = -(phy->noise >> 4); 1714 } 1715 1716 static bool 1717 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 1718 { 1719 bool ret; 1720 1721 ret = wait_event_timeout(dev->reset_wait, 1722 (READ_ONCE(dev->recovery.state) & state), 1723 MT7996_RESET_TIMEOUT); 1724 1725 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1726 return ret; 1727 } 1728 1729 static void 1730 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1731 { 1732 struct ieee80211_hw *hw = priv; 1733 1734 switch (vif->type) { 1735 case NL80211_IFTYPE_MESH_POINT: 1736 case NL80211_IFTYPE_ADHOC: 1737 case NL80211_IFTYPE_AP: 1738 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); 1739 break; 1740 default: 1741 break; 1742 } 1743 } 1744 1745 static void 1746 mt7996_update_beacons(struct mt7996_dev *dev) 1747 { 1748 struct mt76_phy *phy2, *phy3; 1749 1750 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1751 IEEE80211_IFACE_ITER_RESUME_ALL, 1752 mt7996_update_vif_beacon, dev->mt76.hw); 1753 1754 phy2 = dev->mt76.phys[MT_BAND1]; 1755 if (!phy2) 1756 return; 1757 1758 ieee80211_iterate_active_interfaces(phy2->hw, 1759 IEEE80211_IFACE_ITER_RESUME_ALL, 1760 mt7996_update_vif_beacon, phy2->hw); 1761 1762 phy3 = dev->mt76.phys[MT_BAND2]; 1763 if (!phy3) 1764 return; 1765 1766 ieee80211_iterate_active_interfaces(phy3->hw, 1767 IEEE80211_IFACE_ITER_RESUME_ALL, 1768 mt7996_update_vif_beacon, phy3->hw); 1769 } 1770 1771 void mt7996_tx_token_put(struct mt7996_dev *dev) 1772 { 1773 struct mt76_txwi_cache *txwi; 1774 int id; 1775 1776 spin_lock_bh(&dev->mt76.token_lock); 1777 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1778 mt7996_txwi_free(dev, txwi, NULL, NULL); 1779 dev->mt76.token_count--; 1780 } 1781 spin_unlock_bh(&dev->mt76.token_lock); 1782 idr_destroy(&dev->mt76.token); 1783 } 1784 1785 static int 1786 mt7996_mac_restart(struct mt7996_dev *dev) 1787 { 1788 struct mt7996_phy *phy2, *phy3; 1789 struct mt76_dev *mdev = &dev->mt76; 1790 int i, ret; 1791 1792 phy2 = mt7996_phy2(dev); 1793 phy3 = mt7996_phy3(dev); 1794 1795 if (dev->hif2) { 1796 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1797 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1798 } 1799 1800 if (dev_is_pci(mdev->dev)) { 1801 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1802 if (dev->hif2) 1803 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1804 } 1805 1806 set_bit(MT76_RESET, &dev->mphy.state); 1807 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1808 wake_up(&dev->mt76.mcu.wait); 1809 if (phy2) { 1810 set_bit(MT76_RESET, &phy2->mt76->state); 1811 set_bit(MT76_MCU_RESET, &phy2->mt76->state); 1812 } 1813 if (phy3) { 1814 set_bit(MT76_RESET, &phy3->mt76->state); 1815 set_bit(MT76_MCU_RESET, &phy3->mt76->state); 1816 } 1817 1818 /* lock/unlock all queues to ensure that no tx is pending */ 1819 mt76_txq_schedule_all(&dev->mphy); 1820 if (phy2) 1821 mt76_txq_schedule_all(phy2->mt76); 1822 if (phy3) 1823 mt76_txq_schedule_all(phy3->mt76); 1824 1825 /* disable all tx/rx napi */ 1826 mt76_worker_disable(&dev->mt76.tx_worker); 1827 mt76_for_each_q_rx(mdev, i) { 1828 if (mdev->q_rx[i].ndesc) 1829 napi_disable(&dev->mt76.napi[i]); 1830 } 1831 napi_disable(&dev->mt76.tx_napi); 1832 1833 /* token reinit */ 1834 mt7996_tx_token_put(dev); 1835 idr_init(&dev->mt76.token); 1836 1837 mt7996_dma_reset(dev, true); 1838 1839 local_bh_disable(); 1840 mt76_for_each_q_rx(mdev, i) { 1841 if (mdev->q_rx[i].ndesc) { 1842 napi_enable(&dev->mt76.napi[i]); 1843 napi_schedule(&dev->mt76.napi[i]); 1844 } 1845 } 1846 local_bh_enable(); 1847 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1848 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1849 1850 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1851 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1852 if (dev->hif2) { 1853 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1854 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1855 } 1856 if (dev_is_pci(mdev->dev)) { 1857 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1858 if (dev->hif2) 1859 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1860 } 1861 1862 /* load firmware */ 1863 ret = mt7996_mcu_init_firmware(dev); 1864 if (ret) 1865 goto out; 1866 1867 /* set the necessary init items */ 1868 ret = mt7996_mcu_set_eeprom(dev); 1869 if (ret) 1870 goto out; 1871 1872 mt7996_mac_init(dev); 1873 mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband); 1874 mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband); 1875 mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband); 1876 ret = mt7996_txbf_init(dev); 1877 1878 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1879 ret = mt7996_run(dev->mphy.hw); 1880 if (ret) 1881 goto out; 1882 } 1883 1884 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) { 1885 ret = mt7996_run(phy2->mt76->hw); 1886 if (ret) 1887 goto out; 1888 } 1889 1890 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) { 1891 ret = mt7996_run(phy3->mt76->hw); 1892 if (ret) 1893 goto out; 1894 } 1895 1896 out: 1897 /* reset done */ 1898 clear_bit(MT76_RESET, &dev->mphy.state); 1899 if (phy2) 1900 clear_bit(MT76_RESET, &phy2->mt76->state); 1901 if (phy3) 1902 clear_bit(MT76_RESET, &phy3->mt76->state); 1903 1904 local_bh_disable(); 1905 napi_enable(&dev->mt76.tx_napi); 1906 napi_schedule(&dev->mt76.tx_napi); 1907 local_bh_enable(); 1908 1909 mt76_worker_enable(&dev->mt76.tx_worker); 1910 return ret; 1911 } 1912 1913 static void 1914 mt7996_mac_full_reset(struct mt7996_dev *dev) 1915 { 1916 struct mt7996_phy *phy2, *phy3; 1917 int i; 1918 1919 phy2 = mt7996_phy2(dev); 1920 phy3 = mt7996_phy3(dev); 1921 dev->recovery.hw_full_reset = true; 1922 1923 wake_up(&dev->mt76.mcu.wait); 1924 ieee80211_stop_queues(mt76_hw(dev)); 1925 if (phy2) 1926 ieee80211_stop_queues(phy2->mt76->hw); 1927 if (phy3) 1928 ieee80211_stop_queues(phy3->mt76->hw); 1929 1930 cancel_delayed_work_sync(&dev->mphy.mac_work); 1931 if (phy2) 1932 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1933 if (phy3) 1934 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1935 1936 mutex_lock(&dev->mt76.mutex); 1937 for (i = 0; i < 10; i++) { 1938 if (!mt7996_mac_restart(dev)) 1939 break; 1940 } 1941 mutex_unlock(&dev->mt76.mutex); 1942 1943 if (i == 10) 1944 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1945 1946 ieee80211_restart_hw(mt76_hw(dev)); 1947 if (phy2) 1948 ieee80211_restart_hw(phy2->mt76->hw); 1949 if (phy3) 1950 ieee80211_restart_hw(phy3->mt76->hw); 1951 1952 ieee80211_wake_queues(mt76_hw(dev)); 1953 if (phy2) 1954 ieee80211_wake_queues(phy2->mt76->hw); 1955 if (phy3) 1956 ieee80211_wake_queues(phy3->mt76->hw); 1957 1958 dev->recovery.hw_full_reset = false; 1959 ieee80211_queue_delayed_work(mt76_hw(dev), 1960 &dev->mphy.mac_work, 1961 MT7996_WATCHDOG_TIME); 1962 if (phy2) 1963 ieee80211_queue_delayed_work(phy2->mt76->hw, 1964 &phy2->mt76->mac_work, 1965 MT7996_WATCHDOG_TIME); 1966 if (phy3) 1967 ieee80211_queue_delayed_work(phy3->mt76->hw, 1968 &phy3->mt76->mac_work, 1969 MT7996_WATCHDOG_TIME); 1970 } 1971 1972 void mt7996_mac_reset_work(struct work_struct *work) 1973 { 1974 struct mt7996_phy *phy2, *phy3; 1975 struct mt7996_dev *dev; 1976 int i; 1977 1978 dev = container_of(work, struct mt7996_dev, reset_work); 1979 phy2 = mt7996_phy2(dev); 1980 phy3 = mt7996_phy3(dev); 1981 1982 /* chip full reset */ 1983 if (dev->recovery.restart) { 1984 /* disable WA/WM WDT */ 1985 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1986 MT_MCU_CMD_WDT_MASK); 1987 1988 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1989 dev->recovery.wa_reset_count++; 1990 else 1991 dev->recovery.wm_reset_count++; 1992 1993 mt7996_mac_full_reset(dev); 1994 1995 /* enable mcu irq */ 1996 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 1997 mt7996_irq_disable(dev, 0); 1998 1999 /* enable WA/WM WDT */ 2000 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 2001 2002 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 2003 dev->recovery.restart = false; 2004 return; 2005 } 2006 2007 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 2008 return; 2009 2010 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 2011 wiphy_name(dev->mt76.hw->wiphy)); 2012 ieee80211_stop_queues(mt76_hw(dev)); 2013 if (phy2) 2014 ieee80211_stop_queues(phy2->mt76->hw); 2015 if (phy3) 2016 ieee80211_stop_queues(phy3->mt76->hw); 2017 2018 set_bit(MT76_RESET, &dev->mphy.state); 2019 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2020 wake_up(&dev->mt76.mcu.wait); 2021 cancel_delayed_work_sync(&dev->mphy.mac_work); 2022 if (phy2) { 2023 set_bit(MT76_RESET, &phy2->mt76->state); 2024 cancel_delayed_work_sync(&phy2->mt76->mac_work); 2025 } 2026 if (phy3) { 2027 set_bit(MT76_RESET, &phy3->mt76->state); 2028 cancel_delayed_work_sync(&phy3->mt76->mac_work); 2029 } 2030 mt76_worker_disable(&dev->mt76.tx_worker); 2031 mt76_for_each_q_rx(&dev->mt76, i) 2032 napi_disable(&dev->mt76.napi[i]); 2033 napi_disable(&dev->mt76.tx_napi); 2034 2035 mutex_lock(&dev->mt76.mutex); 2036 2037 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2038 2039 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2040 mt7996_dma_reset(dev, false); 2041 2042 mt7996_tx_token_put(dev); 2043 idr_init(&dev->mt76.token); 2044 2045 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2046 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2047 } 2048 2049 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2050 clear_bit(MT76_RESET, &dev->mphy.state); 2051 if (phy2) 2052 clear_bit(MT76_RESET, &phy2->mt76->state); 2053 if (phy3) 2054 clear_bit(MT76_RESET, &phy3->mt76->state); 2055 2056 local_bh_disable(); 2057 mt76_for_each_q_rx(&dev->mt76, i) { 2058 napi_enable(&dev->mt76.napi[i]); 2059 napi_schedule(&dev->mt76.napi[i]); 2060 } 2061 local_bh_enable(); 2062 2063 tasklet_schedule(&dev->mt76.irq_tasklet); 2064 2065 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2066 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2067 2068 mt76_worker_enable(&dev->mt76.tx_worker); 2069 2070 local_bh_disable(); 2071 napi_enable(&dev->mt76.tx_napi); 2072 napi_schedule(&dev->mt76.tx_napi); 2073 local_bh_enable(); 2074 2075 ieee80211_wake_queues(mt76_hw(dev)); 2076 if (phy2) 2077 ieee80211_wake_queues(phy2->mt76->hw); 2078 if (phy3) 2079 ieee80211_wake_queues(phy3->mt76->hw); 2080 2081 mutex_unlock(&dev->mt76.mutex); 2082 2083 mt7996_update_beacons(dev); 2084 2085 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 2086 MT7996_WATCHDOG_TIME); 2087 if (phy2) 2088 ieee80211_queue_delayed_work(phy2->mt76->hw, 2089 &phy2->mt76->mac_work, 2090 MT7996_WATCHDOG_TIME); 2091 if (phy3) 2092 ieee80211_queue_delayed_work(phy3->mt76->hw, 2093 &phy3->mt76->mac_work, 2094 MT7996_WATCHDOG_TIME); 2095 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2096 wiphy_name(dev->mt76.hw->wiphy)); 2097 } 2098 2099 /* firmware coredump */ 2100 void mt7996_mac_dump_work(struct work_struct *work) 2101 { 2102 const struct mt7996_mem_region *mem_region; 2103 struct mt7996_crash_data *crash_data; 2104 struct mt7996_dev *dev; 2105 struct mt7996_mem_hdr *hdr; 2106 size_t buf_len; 2107 int i; 2108 u32 num; 2109 u8 *buf; 2110 2111 dev = container_of(work, struct mt7996_dev, dump_work); 2112 2113 mutex_lock(&dev->dump_mutex); 2114 2115 crash_data = mt7996_coredump_new(dev); 2116 if (!crash_data) { 2117 mutex_unlock(&dev->dump_mutex); 2118 goto skip_coredump; 2119 } 2120 2121 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2122 if (!mem_region || !crash_data->memdump_buf_len) { 2123 mutex_unlock(&dev->dump_mutex); 2124 goto skip_memdump; 2125 } 2126 2127 buf = crash_data->memdump_buf; 2128 buf_len = crash_data->memdump_buf_len; 2129 2130 /* dumping memory content... */ 2131 memset(buf, 0, buf_len); 2132 for (i = 0; i < num; i++) { 2133 if (mem_region->len > buf_len) { 2134 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2135 mem_region->name, mem_region->len); 2136 break; 2137 } 2138 2139 /* reserve space for the header */ 2140 hdr = (void *)buf; 2141 buf += sizeof(*hdr); 2142 buf_len -= sizeof(*hdr); 2143 2144 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2145 mem_region->len); 2146 2147 hdr->start = mem_region->start; 2148 hdr->len = mem_region->len; 2149 2150 if (!mem_region->len) 2151 /* note: the header remains, just with zero length */ 2152 break; 2153 2154 buf += mem_region->len; 2155 buf_len -= mem_region->len; 2156 2157 mem_region++; 2158 } 2159 2160 mutex_unlock(&dev->dump_mutex); 2161 2162 skip_memdump: 2163 mt7996_coredump_submit(dev); 2164 skip_coredump: 2165 queue_work(dev->mt76.wq, &dev->reset_work); 2166 } 2167 2168 void mt7996_reset(struct mt7996_dev *dev) 2169 { 2170 if (!dev->recovery.hw_init_done) 2171 return; 2172 2173 if (dev->recovery.hw_full_reset) 2174 return; 2175 2176 /* wm/wa exception: do full recovery */ 2177 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2178 dev->recovery.restart = true; 2179 dev_info(dev->mt76.dev, 2180 "%s indicated firmware crash, attempting recovery\n", 2181 wiphy_name(dev->mt76.hw->wiphy)); 2182 2183 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2184 queue_work(dev->mt76.wq, &dev->dump_work); 2185 return; 2186 } 2187 2188 queue_work(dev->mt76.wq, &dev->reset_work); 2189 wake_up(&dev->reset_wait); 2190 } 2191 2192 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2193 { 2194 struct mt7996_dev *dev = phy->dev; 2195 struct mib_stats *mib = &phy->mib; 2196 u8 band_idx = phy->mt76->band_idx; 2197 u32 cnt; 2198 int i; 2199 2200 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2201 mib->fcs_err_cnt += cnt; 2202 2203 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2204 mib->rx_fifo_full_cnt += cnt; 2205 2206 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2207 mib->rx_mpdu_cnt += cnt; 2208 2209 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2210 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2211 2212 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2213 mib->rx_vector_mismatch_cnt += cnt; 2214 2215 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2216 mib->rx_delimiter_fail_cnt += cnt; 2217 2218 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2219 mib->rx_len_mismatch_cnt += cnt; 2220 2221 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2222 mib->tx_ampdu_cnt += cnt; 2223 2224 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2225 mib->tx_stop_q_empty_cnt += cnt; 2226 2227 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2228 mib->tx_mpdu_attempts_cnt += cnt; 2229 2230 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2231 mib->tx_mpdu_success_cnt += cnt; 2232 2233 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2234 mib->rx_ampdu_cnt += cnt; 2235 2236 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2237 mib->rx_ampdu_bytes_cnt += cnt; 2238 2239 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2240 mib->rx_ampdu_valid_subframe_cnt += cnt; 2241 2242 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2243 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2244 2245 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2246 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2247 2248 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2249 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2250 2251 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2252 mib->rx_pfdrop_cnt += cnt; 2253 2254 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2255 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2256 2257 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2258 mib->rx_ba_cnt += cnt; 2259 2260 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2261 mib->tx_bf_ebf_ppdu_cnt += cnt; 2262 2263 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2264 mib->tx_bf_ibf_ppdu_cnt += cnt; 2265 2266 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2267 mib->tx_mu_bf_cnt += cnt; 2268 2269 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2270 mib->tx_mu_mpdu_cnt += cnt; 2271 2272 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2273 mib->tx_mu_acked_mpdu_cnt += cnt; 2274 2275 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2276 mib->tx_su_acked_mpdu_cnt += cnt; 2277 2278 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2279 mib->tx_bf_rx_fb_ht_cnt += cnt; 2280 mib->tx_bf_rx_fb_all_cnt += cnt; 2281 2282 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2283 mib->tx_bf_rx_fb_vht_cnt += cnt; 2284 mib->tx_bf_rx_fb_all_cnt += cnt; 2285 2286 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2287 mib->tx_bf_rx_fb_he_cnt += cnt; 2288 mib->tx_bf_rx_fb_all_cnt += cnt; 2289 2290 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2291 mib->tx_bf_rx_fb_eht_cnt += cnt; 2292 mib->tx_bf_rx_fb_all_cnt += cnt; 2293 2294 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2295 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2296 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2297 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2298 2299 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2300 mib->tx_bf_fb_trig_cnt += cnt; 2301 2302 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2303 mib->tx_bf_fb_cpl_cnt += cnt; 2304 2305 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2306 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2307 mib->tx_amsdu[i] += cnt; 2308 mib->tx_amsdu_cnt += cnt; 2309 } 2310 2311 /* rts count */ 2312 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2313 mib->rts_cnt += cnt; 2314 2315 /* rts retry count */ 2316 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2317 mib->rts_retries_cnt += cnt; 2318 2319 /* ba miss count */ 2320 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2321 mib->ba_miss_cnt += cnt; 2322 2323 /* ack fail count */ 2324 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2325 mib->ack_fail_cnt += cnt; 2326 2327 for (i = 0; i < 16; i++) { 2328 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2329 phy->mt76->aggr_stats[i] += cnt; 2330 } 2331 } 2332 2333 void mt7996_mac_sta_rc_work(struct work_struct *work) 2334 { 2335 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2336 struct ieee80211_sta *sta; 2337 struct ieee80211_vif *vif; 2338 struct mt7996_sta *msta; 2339 u32 changed; 2340 LIST_HEAD(list); 2341 2342 spin_lock_bh(&dev->sta_poll_lock); 2343 list_splice_init(&dev->sta_rc_list, &list); 2344 2345 while (!list_empty(&list)) { 2346 msta = list_first_entry(&list, struct mt7996_sta, rc_list); 2347 list_del_init(&msta->rc_list); 2348 changed = msta->changed; 2349 msta->changed = 0; 2350 spin_unlock_bh(&dev->sta_poll_lock); 2351 2352 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2353 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2354 2355 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2356 IEEE80211_RC_NSS_CHANGED | 2357 IEEE80211_RC_BW_CHANGED)) 2358 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true); 2359 2360 /* TODO: smps change */ 2361 2362 spin_lock_bh(&dev->sta_poll_lock); 2363 } 2364 2365 spin_unlock_bh(&dev->sta_poll_lock); 2366 } 2367 2368 void mt7996_mac_work(struct work_struct *work) 2369 { 2370 struct mt7996_phy *phy; 2371 struct mt76_phy *mphy; 2372 2373 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2374 mac_work.work); 2375 phy = mphy->priv; 2376 2377 mutex_lock(&mphy->dev->mutex); 2378 2379 mt76_update_survey(mphy); 2380 if (++mphy->mac_work_count == 5) { 2381 mphy->mac_work_count = 0; 2382 2383 mt7996_mac_update_stats(phy); 2384 } 2385 2386 mutex_unlock(&mphy->dev->mutex); 2387 2388 mt76_tx_status_check(mphy->dev, false); 2389 2390 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2391 MT7996_WATCHDOG_TIME); 2392 } 2393 2394 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2395 { 2396 struct mt7996_dev *dev = phy->dev; 2397 2398 if (phy->rdd_state & BIT(0)) 2399 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0, 2400 MT_RX_SEL0, 0); 2401 if (phy->rdd_state & BIT(1)) 2402 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1, 2403 MT_RX_SEL0, 0); 2404 } 2405 2406 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain) 2407 { 2408 int err, region; 2409 2410 switch (dev->mt76.region) { 2411 case NL80211_DFS_ETSI: 2412 region = 0; 2413 break; 2414 case NL80211_DFS_JP: 2415 region = 2; 2416 break; 2417 case NL80211_DFS_FCC: 2418 default: 2419 region = 1; 2420 break; 2421 } 2422 2423 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain, 2424 MT_RX_SEL0, region); 2425 if (err < 0) 2426 return err; 2427 2428 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 2429 MT_RX_SEL0, 1); 2430 } 2431 2432 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2433 { 2434 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2435 struct mt7996_dev *dev = phy->dev; 2436 u8 band_idx = phy->mt76->band_idx; 2437 int err; 2438 2439 /* start CAC */ 2440 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx, 2441 MT_RX_SEL0, 0); 2442 if (err < 0) 2443 return err; 2444 2445 err = mt7996_dfs_start_rdd(dev, band_idx); 2446 if (err < 0) 2447 return err; 2448 2449 phy->rdd_state |= BIT(band_idx); 2450 2451 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2452 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2453 err = mt7996_dfs_start_rdd(dev, 1); 2454 if (err < 0) 2455 return err; 2456 2457 phy->rdd_state |= BIT(1); 2458 } 2459 2460 return 0; 2461 } 2462 2463 static int 2464 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2465 { 2466 const struct mt7996_dfs_radar_spec *radar_specs; 2467 struct mt7996_dev *dev = phy->dev; 2468 int err, i; 2469 2470 switch (dev->mt76.region) { 2471 case NL80211_DFS_FCC: 2472 radar_specs = &fcc_radar_specs; 2473 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2474 if (err < 0) 2475 return err; 2476 break; 2477 case NL80211_DFS_ETSI: 2478 radar_specs = &etsi_radar_specs; 2479 break; 2480 case NL80211_DFS_JP: 2481 radar_specs = &jp_radar_specs; 2482 break; 2483 default: 2484 return -EINVAL; 2485 } 2486 2487 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2488 err = mt7996_mcu_set_radar_th(dev, i, 2489 &radar_specs->radar_pattern[i]); 2490 if (err < 0) 2491 return err; 2492 } 2493 2494 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2495 } 2496 2497 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 2498 { 2499 struct mt7996_dev *dev = phy->dev; 2500 enum mt76_dfs_state dfs_state, prev_state; 2501 int err; 2502 2503 prev_state = phy->mt76->dfs_state; 2504 dfs_state = mt76_phy_dfs_state(phy->mt76); 2505 2506 if (prev_state == dfs_state) 2507 return 0; 2508 2509 if (prev_state == MT_DFS_STATE_UNKNOWN) 2510 mt7996_dfs_stop_radar_detector(phy); 2511 2512 if (dfs_state == MT_DFS_STATE_DISABLED) 2513 goto stop; 2514 2515 if (prev_state <= MT_DFS_STATE_DISABLED) { 2516 err = mt7996_dfs_init_radar_specs(phy); 2517 if (err < 0) 2518 return err; 2519 2520 err = mt7996_dfs_start_radar_detector(phy); 2521 if (err < 0) 2522 return err; 2523 2524 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2525 } 2526 2527 if (dfs_state == MT_DFS_STATE_CAC) 2528 return 0; 2529 2530 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, 2531 phy->mt76->band_idx, MT_RX_SEL0, 0); 2532 if (err < 0) { 2533 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2534 return err; 2535 } 2536 2537 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2538 return 0; 2539 2540 stop: 2541 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, 2542 phy->mt76->band_idx, MT_RX_SEL0, 0); 2543 if (err < 0) 2544 return err; 2545 2546 mt7996_dfs_stop_radar_detector(phy); 2547 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2548 2549 return 0; 2550 } 2551 2552 static int 2553 mt7996_mac_twt_duration_align(int duration) 2554 { 2555 return duration << 8; 2556 } 2557 2558 static u64 2559 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 2560 struct mt7996_twt_flow *flow) 2561 { 2562 struct mt7996_twt_flow *iter, *iter_next; 2563 u32 duration = flow->duration << 8; 2564 u64 start_tsf; 2565 2566 iter = list_first_entry_or_null(&dev->twt_list, 2567 struct mt7996_twt_flow, list); 2568 if (!iter || !iter->sched || iter->start_tsf > duration) { 2569 /* add flow as first entry in the list */ 2570 list_add(&flow->list, &dev->twt_list); 2571 return 0; 2572 } 2573 2574 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2575 start_tsf = iter->start_tsf + 2576 mt7996_mac_twt_duration_align(iter->duration); 2577 if (list_is_last(&iter->list, &dev->twt_list)) 2578 break; 2579 2580 if (!iter_next->sched || 2581 iter_next->start_tsf > start_tsf + duration) { 2582 list_add(&flow->list, &iter->list); 2583 goto out; 2584 } 2585 } 2586 2587 /* add flow as last entry in the list */ 2588 list_add_tail(&flow->list, &dev->twt_list); 2589 out: 2590 return start_tsf; 2591 } 2592 2593 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2594 { 2595 struct ieee80211_twt_params *twt_agrt; 2596 u64 interval, duration; 2597 u16 mantissa; 2598 u8 exp; 2599 2600 /* only individual agreement supported */ 2601 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2602 return -EOPNOTSUPP; 2603 2604 /* only 256us unit supported */ 2605 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2606 return -EOPNOTSUPP; 2607 2608 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2609 2610 /* explicit agreement not supported */ 2611 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2612 return -EOPNOTSUPP; 2613 2614 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2615 le16_to_cpu(twt_agrt->req_type)); 2616 mantissa = le16_to_cpu(twt_agrt->mantissa); 2617 duration = twt_agrt->min_twt_dur << 8; 2618 2619 interval = (u64)mantissa << exp; 2620 if (interval < duration) 2621 return -EOPNOTSUPP; 2622 2623 return 0; 2624 } 2625 2626 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 2627 struct ieee80211_sta *sta, 2628 struct ieee80211_twt_setup *twt) 2629 { 2630 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2631 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2632 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2633 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2634 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2635 struct mt7996_dev *dev = mt7996_hw_dev(hw); 2636 struct mt7996_twt_flow *flow; 2637 int flowid, table_id; 2638 u8 exp; 2639 2640 if (mt7996_mac_check_twt_req(twt)) 2641 goto out; 2642 2643 mutex_lock(&dev->mt76.mutex); 2644 2645 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 2646 goto unlock; 2647 2648 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2649 goto unlock; 2650 2651 flowid = ffs(~msta->twt.flowid_mask) - 1; 2652 le16p_replace_bits(&twt_agrt->req_type, flowid, 2653 IEEE80211_TWT_REQTYPE_FLOWID); 2654 2655 table_id = ffs(~dev->twt.table_mask) - 1; 2656 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2657 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2658 2659 flow = &msta->twt.flow[flowid]; 2660 memset(flow, 0, sizeof(*flow)); 2661 INIT_LIST_HEAD(&flow->list); 2662 flow->wcid = msta->wcid.idx; 2663 flow->table_id = table_id; 2664 flow->id = flowid; 2665 flow->duration = twt_agrt->min_twt_dur; 2666 flow->mantissa = twt_agrt->mantissa; 2667 flow->exp = exp; 2668 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2669 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2670 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2671 2672 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2673 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2674 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2675 u64 flow_tsf, curr_tsf; 2676 u32 rem; 2677 2678 flow->sched = true; 2679 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 2680 curr_tsf = __mt7996_get_tsf(hw, msta->vif); 2681 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2682 flow_tsf = curr_tsf + interval - rem; 2683 twt_agrt->twt = cpu_to_le64(flow_tsf); 2684 } else { 2685 list_add_tail(&flow->list, &dev->twt_list); 2686 } 2687 flow->tsf = le64_to_cpu(twt_agrt->twt); 2688 2689 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2690 goto unlock; 2691 2692 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2693 dev->twt.table_mask |= BIT(table_id); 2694 msta->twt.flowid_mask |= BIT(flowid); 2695 dev->twt.n_agrt++; 2696 2697 unlock: 2698 mutex_unlock(&dev->mt76.mutex); 2699 out: 2700 le16p_replace_bits(&twt_agrt->req_type, setup_cmd, 2701 IEEE80211_TWT_REQTYPE_SETUP_CMD); 2702 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2703 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2704 } 2705 2706 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 2707 struct mt7996_sta *msta, 2708 u8 flowid) 2709 { 2710 struct mt7996_twt_flow *flow; 2711 2712 lockdep_assert_held(&dev->mt76.mutex); 2713 2714 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2715 return; 2716 2717 if (!(msta->twt.flowid_mask & BIT(flowid))) 2718 return; 2719 2720 flow = &msta->twt.flow[flowid]; 2721 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, 2722 MCU_TWT_AGRT_DELETE)) 2723 return; 2724 2725 list_del_init(&flow->list); 2726 msta->twt.flowid_mask &= ~BIT(flowid); 2727 dev->twt.table_mask &= ~BIT(flow->table_id); 2728 dev->twt.n_agrt--; 2729 } 2730