1 /* SPDX-License-Identifier: ISC */ 2 3 #include <linux/etherdevice.h> 4 #include <linux/timekeeping.h> 5 #include "mt7603.h" 6 #include "mac.h" 7 8 #define MT_PSE_PAGE_SIZE 128 9 10 static u32 11 mt7603_ac_queue_mask0(u32 mask) 12 { 13 u32 ret = 0; 14 15 ret |= GENMASK(3, 0) * !!(mask & BIT(0)); 16 ret |= GENMASK(8, 5) * !!(mask & BIT(1)); 17 ret |= GENMASK(13, 10) * !!(mask & BIT(2)); 18 ret |= GENMASK(19, 16) * !!(mask & BIT(3)); 19 return ret; 20 } 21 22 static void 23 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) 24 { 25 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); 26 } 27 28 static void 29 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) 30 { 31 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); 32 } 33 34 void mt7603_mac_set_timing(struct mt7603_dev *dev) 35 { 36 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 37 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 38 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 39 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 40 int offset = 3 * dev->coverage_class; 41 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 42 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 43 int sifs; 44 u32 val; 45 46 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) 47 sifs = 16; 48 else 49 sifs = 10; 50 51 mt76_set(dev, MT_ARB_SCR, 52 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 53 udelay(1); 54 55 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); 56 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); 57 mt76_wr(dev, MT_IFS, 58 FIELD_PREP(MT_IFS_EIFS, 360) | 59 FIELD_PREP(MT_IFS_RIFS, 2) | 60 FIELD_PREP(MT_IFS_SIFS, sifs) | 61 FIELD_PREP(MT_IFS_SLOT, dev->slottime)); 62 63 if (dev->slottime < 20) 64 val = MT7603_CFEND_RATE_DEFAULT; 65 else 66 val = MT7603_CFEND_RATE_11B; 67 68 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); 69 70 mt76_clear(dev, MT_ARB_SCR, 71 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 72 } 73 74 static void 75 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) 76 { 77 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 78 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 79 80 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 81 } 82 83 static u32 84 mt7603_wtbl1_addr(int idx) 85 { 86 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 87 } 88 89 static u32 90 mt7603_wtbl2_addr(int idx) 91 { 92 /* Mapped to WTBL2 */ 93 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; 94 } 95 96 static u32 97 mt7603_wtbl3_addr(int idx) 98 { 99 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); 100 101 return base + idx * MT_WTBL3_SIZE; 102 } 103 104 static u32 105 mt7603_wtbl4_addr(int idx) 106 { 107 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); 108 109 return base + idx * MT_WTBL4_SIZE; 110 } 111 112 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 113 const u8 *mac_addr) 114 { 115 const void *_mac = mac_addr; 116 u32 addr = mt7603_wtbl1_addr(idx); 117 u32 w0 = 0, w1 = 0; 118 int i; 119 120 if (_mac) { 121 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, 122 get_unaligned_le16(_mac + 4)); 123 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, 124 get_unaligned_le32(_mac)); 125 } 126 127 if (vif < 0) 128 vif = 0; 129 else 130 w0 |= MT_WTBL1_W0_RX_CHECK_A1; 131 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); 132 133 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 134 135 mt76_set(dev, addr + 0 * 4, w0); 136 mt76_set(dev, addr + 1 * 4, w1); 137 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); 138 139 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 140 addr = mt7603_wtbl2_addr(idx); 141 for (i = 0; i < MT_WTBL2_SIZE; i += 4) 142 mt76_wr(dev, addr + i, 0); 143 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 144 mt76_start_tx_ac(dev, GENMASK(3, 0)); 145 146 addr = mt7603_wtbl3_addr(idx); 147 for (i = 0; i < MT_WTBL3_SIZE; i += 4) 148 mt76_wr(dev, addr + i, 0); 149 150 addr = mt7603_wtbl4_addr(idx); 151 for (i = 0; i < MT_WTBL4_SIZE; i += 4) 152 mt76_wr(dev, addr + i, 0); 153 } 154 155 static void 156 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) 157 { 158 u32 addr = mt7603_wtbl1_addr(idx); 159 u32 val = mt76_rr(dev, addr + 3 * 4); 160 161 val &= ~MT_WTBL1_W3_SKIP_TX; 162 val |= enabled * MT_WTBL1_W3_SKIP_TX; 163 164 mt76_wr(dev, addr + 3 * 4, val); 165 } 166 167 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) 168 { 169 int i, port, queue; 170 171 if (abort) { 172 port = 3; /* PSE */ 173 queue = 8; /* free queue */ 174 } else { 175 port = 0; /* HIF */ 176 queue = 1; /* MCU queue */ 177 } 178 179 mt7603_wtbl_set_skip_tx(dev, idx, true); 180 181 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | 182 FIELD_PREP(MT_TX_ABORT_WCID, idx)); 183 184 for (i = 0; i < 4; i++) { 185 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 186 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | 187 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | 188 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | 189 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); 190 191 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 192 0, 5000)); 193 } 194 195 mt76_wr(dev, MT_TX_ABORT, 0); 196 197 mt7603_wtbl_set_skip_tx(dev, idx, false); 198 } 199 200 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 201 bool enabled) 202 { 203 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); 204 205 if (sta->smps == enabled) 206 return; 207 208 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); 209 sta->smps = enabled; 210 } 211 212 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 213 bool enabled) 214 { 215 int idx = sta->wcid.idx; 216 u32 addr; 217 218 spin_lock_bh(&dev->ps_lock); 219 220 if (sta->ps == enabled) 221 goto out; 222 223 mt76_wr(dev, MT_PSE_RTA, 224 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | 225 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | 226 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | 227 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | 228 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); 229 230 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 231 232 if (enabled) 233 mt7603_filter_tx(dev, idx, false); 234 235 addr = mt7603_wtbl1_addr(idx); 236 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 237 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, 238 enabled * MT_WTBL1_W3_POWER_SAVE); 239 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 240 sta->ps = enabled; 241 242 out: 243 spin_unlock_bh(&dev->ps_lock); 244 } 245 246 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) 247 { 248 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; 249 int wtbl2_frame = idx / wtbl2_frame_size; 250 int wtbl2_entry = idx % wtbl2_frame_size; 251 252 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; 253 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; 254 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; 255 int wtbl3_entry = (idx % wtbl3_frame_size) * 2; 256 257 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; 258 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; 259 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; 260 int wtbl4_entry = idx % wtbl4_frame_size; 261 262 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 263 int i; 264 265 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 266 267 mt76_wr(dev, addr + 0 * 4, 268 MT_WTBL1_W0_RX_CHECK_A1 | 269 MT_WTBL1_W0_RX_CHECK_A2 | 270 MT_WTBL1_W0_RX_VALID); 271 mt76_wr(dev, addr + 1 * 4, 0); 272 mt76_wr(dev, addr + 2 * 4, 0); 273 274 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 275 276 mt76_wr(dev, addr + 3 * 4, 277 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | 278 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | 279 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | 280 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); 281 mt76_wr(dev, addr + 4 * 4, 282 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | 283 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | 284 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); 285 286 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 287 288 addr = mt7603_wtbl2_addr(idx); 289 290 /* Clear BA information */ 291 mt76_wr(dev, addr + (15 * 4), 0); 292 293 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 294 for (i = 2; i <= 4; i++) 295 mt76_wr(dev, addr + (i * 4), 0); 296 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 297 mt76_start_tx_ac(dev, GENMASK(3, 0)); 298 299 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); 300 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); 301 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 302 } 303 304 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) 305 { 306 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 307 int idx = msta->wcid.idx; 308 u32 addr; 309 u32 val; 310 311 addr = mt7603_wtbl1_addr(idx); 312 313 val = mt76_rr(dev, addr + 2 * 4); 314 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; 315 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | 316 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | 317 MT_WTBL1_W2_TXS_BAF_REPORT; 318 319 if (sta->ht_cap.cap) 320 val |= MT_WTBL1_W2_HT; 321 if (sta->vht_cap.cap) 322 val |= MT_WTBL1_W2_VHT; 323 324 mt76_wr(dev, addr + 2 * 4, val); 325 326 addr = mt7603_wtbl2_addr(idx); 327 val = mt76_rr(dev, addr + 9 * 4); 328 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 329 MT_WTBL2_W9_SHORT_GI_80); 330 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 331 val |= MT_WTBL2_W9_SHORT_GI_20; 332 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 333 val |= MT_WTBL2_W9_SHORT_GI_40; 334 mt76_wr(dev, addr + 9 * 4, val); 335 } 336 337 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) 338 { 339 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); 340 mt76_wr(dev, MT_BA_CONTROL_1, 341 (get_unaligned_le16(addr + 4) | 342 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | 343 MT_BA_CONTROL_1_RESET)); 344 } 345 346 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, 347 int ba_size) 348 { 349 u32 addr = mt7603_wtbl2_addr(wcid); 350 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 351 (MT_WTBL2_W15_BA_WIN_SIZE << 352 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); 353 u32 tid_val; 354 int i; 355 356 if (ba_size < 0) { 357 /* disable */ 358 mt76_clear(dev, addr + (15 * 4), tid_mask); 359 return; 360 } 361 362 for (i = 7; i > 0; i--) { 363 if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 364 break; 365 } 366 367 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 368 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); 369 370 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); 371 } 372 373 static struct mt76_wcid * 374 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) 375 { 376 struct mt7603_sta *sta; 377 struct mt76_wcid *wcid; 378 379 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 380 return NULL; 381 382 wcid = rcu_dereference(dev->mt76.wcid[idx]); 383 if (unicast || !wcid) 384 return wcid; 385 386 if (!wcid->sta) 387 return NULL; 388 389 sta = container_of(wcid, struct mt7603_sta, wcid); 390 if (!sta->vif) 391 return NULL; 392 393 return &sta->vif->sta.wcid; 394 } 395 396 int 397 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) 398 { 399 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 400 struct ieee80211_supported_band *sband; 401 struct ieee80211_hdr *hdr; 402 __le32 *rxd = (__le32 *)skb->data; 403 u32 rxd0 = le32_to_cpu(rxd[0]); 404 u32 rxd1 = le32_to_cpu(rxd[1]); 405 u32 rxd2 = le32_to_cpu(rxd[2]); 406 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; 407 bool insert_ccmp_hdr = false; 408 bool remove_pad; 409 int idx; 410 int i; 411 412 memset(status, 0, sizeof(*status)); 413 414 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 415 sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband; 416 i >>= 1; 417 418 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 419 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); 420 421 status->band = sband->band; 422 if (i < sband->n_channels) 423 status->freq = sband->channels[i].center_freq; 424 425 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 426 status->flag |= RX_FLAG_FAILED_FCS_CRC; 427 428 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 429 status->flag |= RX_FLAG_MMIC_ERROR; 430 431 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 432 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 433 status->flag |= RX_FLAG_DECRYPTED; 434 status->flag |= RX_FLAG_IV_STRIPPED; 435 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 436 } 437 438 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 439 440 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 441 return -EINVAL; 442 443 if (!sband->channels) 444 return -EINVAL; 445 446 rxd += 4; 447 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 448 rxd += 4; 449 if ((u8 *)rxd - skb->data >= skb->len) 450 return -EINVAL; 451 } 452 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 453 u8 *data = (u8 *)rxd; 454 455 if (status->flag & RX_FLAG_DECRYPTED) { 456 status->iv[0] = data[5]; 457 status->iv[1] = data[4]; 458 status->iv[2] = data[3]; 459 status->iv[3] = data[2]; 460 status->iv[4] = data[1]; 461 status->iv[5] = data[0]; 462 463 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 464 } 465 466 rxd += 4; 467 if ((u8 *)rxd - skb->data >= skb->len) 468 return -EINVAL; 469 } 470 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 471 rxd += 2; 472 if ((u8 *)rxd - skb->data >= skb->len) 473 return -EINVAL; 474 } 475 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 476 u32 rxdg0 = le32_to_cpu(rxd[0]); 477 u32 rxdg3 = le32_to_cpu(rxd[3]); 478 bool cck = false; 479 480 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 481 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 482 case MT_PHY_TYPE_CCK: 483 cck = true; 484 /* fall through */ 485 case MT_PHY_TYPE_OFDM: 486 i = mt76_get_rate(&dev->mt76, sband, i, cck); 487 break; 488 case MT_PHY_TYPE_HT_GF: 489 case MT_PHY_TYPE_HT: 490 status->encoding = RX_ENC_HT; 491 if (i > 15) 492 return -EINVAL; 493 break; 494 default: 495 return -EINVAL; 496 } 497 498 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 499 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 500 if (rxdg0 & MT_RXV1_HT_AD_CODE) 501 status->enc_flags |= RX_ENC_FLAG_LDPC; 502 503 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * 504 FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 505 506 status->rate_idx = i; 507 508 status->chains = dev->mt76.antenna_mask; 509 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + 510 dev->rssi_offset[0]; 511 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + 512 dev->rssi_offset[1]; 513 514 status->signal = status->chain_signal[0]; 515 if (status->chains & BIT(1)) 516 status->signal = max(status->signal, 517 status->chain_signal[1]); 518 519 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) 520 status->bw = RATE_INFO_BW_40; 521 522 rxd += 6; 523 if ((u8 *)rxd - skb->data >= skb->len) 524 return -EINVAL; 525 } else { 526 return -EINVAL; 527 } 528 529 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 530 531 if (insert_ccmp_hdr) { 532 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 533 534 mt76_insert_ccmp_hdr(skb, key_id); 535 } 536 537 hdr = (struct ieee80211_hdr *)skb->data; 538 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 539 return 0; 540 541 status->aggr = unicast && 542 !ieee80211_is_qos_nullfunc(hdr->frame_control); 543 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 544 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 545 546 return 0; 547 } 548 549 static u16 550 mt7603_mac_tx_rate_val(struct mt7603_dev *dev, 551 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) 552 { 553 u8 phy, nss, rate_idx; 554 u16 rateval; 555 556 *bw = 0; 557 if (rate->flags & IEEE80211_TX_RC_MCS) { 558 rate_idx = rate->idx; 559 nss = 1 + (rate->idx >> 3); 560 phy = MT_PHY_TYPE_HT; 561 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 562 phy = MT_PHY_TYPE_HT_GF; 563 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 564 *bw = 1; 565 } else { 566 const struct ieee80211_rate *r; 567 int band = dev->mt76.chandef.chan->band; 568 u16 val; 569 570 nss = 1; 571 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; 572 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 573 val = r->hw_value_short; 574 else 575 val = r->hw_value; 576 577 phy = val >> 8; 578 rate_idx = val & 0xff; 579 } 580 581 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 582 FIELD_PREP(MT_TX_RATE_MODE, phy)); 583 584 if (stbc && nss == 1) 585 rateval |= MT_TX_RATE_STBC; 586 587 return rateval; 588 } 589 590 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 591 struct ieee80211_tx_rate *probe_rate, 592 struct ieee80211_tx_rate *rates) 593 { 594 struct ieee80211_tx_rate *ref; 595 int wcid = sta->wcid.idx; 596 u32 addr = mt7603_wtbl2_addr(wcid); 597 bool stbc = false; 598 int n_rates = sta->n_rates; 599 u8 bw, bw_prev, bw_idx = 0; 600 u16 val[4]; 601 u16 probe_val; 602 u32 w9 = mt76_rr(dev, addr + 9 * 4); 603 bool rateset; 604 int i, k; 605 606 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 607 return; 608 609 for (i = n_rates; i < 4; i++) 610 rates[i] = rates[n_rates - 1]; 611 612 rateset = !(sta->rate_set_tsf & BIT(0)); 613 memcpy(sta->rateset[rateset].rates, rates, 614 sizeof(sta->rateset[rateset].rates)); 615 if (probe_rate) { 616 sta->rateset[rateset].probe_rate = *probe_rate; 617 ref = &sta->rateset[rateset].probe_rate; 618 } else { 619 sta->rateset[rateset].probe_rate.idx = -1; 620 ref = &sta->rateset[rateset].rates[0]; 621 } 622 623 rates = sta->rateset[rateset].rates; 624 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 625 /* 626 * We don't support switching between short and long GI 627 * within the rate set. For accurate tx status reporting, we 628 * need to make sure that flags match. 629 * For improved performance, avoid duplicate entries by 630 * decrementing the MCS index if necessary 631 */ 632 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 633 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 634 635 for (k = 0; k < i; k++) { 636 if (rates[i].idx != rates[k].idx) 637 continue; 638 if ((rates[i].flags ^ rates[k].flags) & 639 IEEE80211_TX_RC_40_MHZ_WIDTH) 640 continue; 641 642 rates[i].idx--; 643 } 644 645 } 646 647 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 648 MT_WTBL2_W9_SHORT_GI_80; 649 650 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); 651 bw_prev = bw; 652 653 if (probe_rate) { 654 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); 655 if (bw) 656 bw_idx = 1; 657 else 658 bw_prev = 0; 659 } else { 660 probe_val = val[0]; 661 } 662 663 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); 664 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); 665 666 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); 667 if (bw_prev) { 668 bw_idx = 3; 669 bw_prev = bw; 670 } 671 672 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); 673 if (bw_prev) { 674 bw_idx = 5; 675 bw_prev = bw; 676 } 677 678 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); 679 if (bw_prev) 680 bw_idx = 7; 681 682 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, 683 bw_idx ? bw_idx - 1 : 7); 684 685 mt76_wr(dev, MT_WTBL_RIUCR0, w9); 686 687 mt76_wr(dev, MT_WTBL_RIUCR1, 688 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 689 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 690 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); 691 692 mt76_wr(dev, MT_WTBL_RIUCR2, 693 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | 694 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 695 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | 696 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 697 698 mt76_wr(dev, MT_WTBL_RIUCR3, 699 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 700 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | 701 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 702 703 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 704 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; 705 706 mt76_wr(dev, MT_WTBL_UPDATE, 707 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 708 MT_WTBL_UPDATE_RATE_UPDATE | 709 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 710 711 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 712 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 713 714 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; 715 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 716 } 717 718 static enum mt7603_cipher_type 719 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 720 { 721 memset(key_data, 0, 32); 722 if (!key) 723 return MT_CIPHER_NONE; 724 725 if (key->keylen > 32) 726 return MT_CIPHER_NONE; 727 728 memcpy(key_data, key->key, key->keylen); 729 730 switch (key->cipher) { 731 case WLAN_CIPHER_SUITE_WEP40: 732 return MT_CIPHER_WEP40; 733 case WLAN_CIPHER_SUITE_WEP104: 734 return MT_CIPHER_WEP104; 735 case WLAN_CIPHER_SUITE_TKIP: 736 /* Rx/Tx MIC keys are swapped */ 737 memcpy(key_data + 16, key->key + 24, 8); 738 memcpy(key_data + 24, key->key + 16, 8); 739 return MT_CIPHER_TKIP; 740 case WLAN_CIPHER_SUITE_CCMP: 741 return MT_CIPHER_AES_CCMP; 742 default: 743 return MT_CIPHER_NONE; 744 } 745 } 746 747 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 748 struct ieee80211_key_conf *key) 749 { 750 enum mt7603_cipher_type cipher; 751 u32 addr = mt7603_wtbl3_addr(wcid); 752 u8 key_data[32]; 753 int key_len = sizeof(key_data); 754 755 cipher = mt7603_mac_get_key_info(key, key_data); 756 if (cipher == MT_CIPHER_NONE && key) 757 return -EOPNOTSUPP; 758 759 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { 760 addr += key->keyidx * 16; 761 key_len = 16; 762 } 763 764 mt76_wr_copy(dev, addr, key_data, key_len); 765 766 addr = mt7603_wtbl1_addr(wcid); 767 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); 768 if (key) 769 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); 770 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); 771 772 return 0; 773 } 774 775 static int 776 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, 777 struct sk_buff *skb, enum mt76_txq_id qid, 778 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 779 int pid, struct ieee80211_key_conf *key) 780 { 781 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 782 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 784 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 785 struct ieee80211_vif *vif = info->control.vif; 786 struct mt76_queue *q = dev->mt76.q_tx[qid].q; 787 struct mt7603_vif *mvif; 788 int wlan_idx; 789 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 790 int tx_count = 8; 791 u8 frame_type, frame_subtype; 792 u16 fc = le16_to_cpu(hdr->frame_control); 793 u16 seqno = 0; 794 u8 vif_idx = 0; 795 u32 val; 796 u8 bw; 797 798 if (vif) { 799 mvif = (struct mt7603_vif *)vif->drv_priv; 800 vif_idx = mvif->idx; 801 if (vif_idx && qid >= MT_TXQ_BEACON) 802 vif_idx += 0x10; 803 } 804 805 if (sta) { 806 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 807 808 tx_count = msta->rate_count; 809 } 810 811 if (wcid) 812 wlan_idx = wcid->idx; 813 else 814 wlan_idx = MT7603_WTBL_RESERVED; 815 816 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; 817 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; 818 819 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 820 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); 821 txwi[0] = cpu_to_le32(val); 822 823 val = MT_TXD1_LONG_FORMAT | 824 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | 825 FIELD_PREP(MT_TXD1_TID, 826 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 827 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 828 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | 829 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | 830 FIELD_PREP(MT_TXD1_PROTECTED, !!key); 831 txwi[1] = cpu_to_le32(val); 832 833 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 834 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); 835 836 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | 837 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | 838 FIELD_PREP(MT_TXD2_MULTICAST, 839 is_multicast_ether_addr(hdr->addr1)); 840 txwi[2] = cpu_to_le32(val); 841 842 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 843 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 844 845 txwi[4] = 0; 846 847 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 848 FIELD_PREP(MT_TXD5_PID, pid); 849 txwi[5] = cpu_to_le32(val); 850 851 txwi[6] = 0; 852 853 if (rate->idx >= 0 && rate->count && 854 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 855 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 856 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); 857 858 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 859 860 val = MT_TXD6_FIXED_BW | 861 FIELD_PREP(MT_TXD6_BW, bw) | 862 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 863 txwi[6] |= cpu_to_le32(val); 864 865 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 866 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 867 868 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 869 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 870 871 tx_count = rate->count; 872 } 873 874 /* use maximum tx count for beacons and buffered multicast */ 875 if (qid >= MT_TXQ_BEACON) 876 tx_count = 0x1f; 877 878 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 879 MT_TXD3_SN_VALID; 880 881 if (ieee80211_is_data_qos(hdr->frame_control)) 882 seqno = le16_to_cpu(hdr->seq_ctrl); 883 else if (ieee80211_is_back_req(hdr->frame_control)) 884 seqno = le16_to_cpu(bar->start_seq_num); 885 else 886 val &= ~MT_TXD3_SN_VALID; 887 888 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); 889 890 txwi[3] = cpu_to_le32(val); 891 892 if (key) { 893 u64 pn = atomic64_inc_return(&key->tx_pn); 894 895 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); 896 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); 897 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); 898 } 899 900 txwi[7] = 0; 901 902 return 0; 903 } 904 905 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 906 enum mt76_txq_id qid, struct mt76_wcid *wcid, 907 struct ieee80211_sta *sta, 908 struct mt76_tx_info *tx_info) 909 { 910 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 911 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); 912 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 913 struct ieee80211_key_conf *key = info->control.hw_key; 914 int pid; 915 916 if (!wcid) 917 wcid = &dev->global_sta.wcid; 918 919 if (sta) { 920 msta = (struct mt7603_sta *)sta->drv_priv; 921 922 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 923 IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 924 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 925 mt7603_wtbl_set_ps(dev, msta, false); 926 } 927 928 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 929 930 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 931 spin_lock_bh(&dev->mt76.lock); 932 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], 933 msta->rates); 934 msta->rate_probe = true; 935 spin_unlock_bh(&dev->mt76.lock); 936 } 937 938 mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid, 939 sta, pid, key); 940 941 return 0; 942 } 943 944 static bool 945 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, 946 struct ieee80211_tx_info *info, __le32 *txs_data) 947 { 948 struct ieee80211_supported_band *sband; 949 struct mt7603_rate_set *rs; 950 int first_idx = 0, last_idx; 951 u32 rate_set_tsf; 952 u32 final_rate; 953 u32 final_rate_flags; 954 bool rs_idx; 955 bool ack_timeout; 956 bool fixed_rate; 957 bool probe; 958 bool ampdu; 959 bool cck = false; 960 int count; 961 u32 txs; 962 int idx; 963 int i; 964 965 fixed_rate = info->status.rates[0].count; 966 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 967 968 txs = le32_to_cpu(txs_data[4]); 969 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); 970 count = FIELD_GET(MT_TXS4_TX_COUNT, txs); 971 last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs); 972 973 txs = le32_to_cpu(txs_data[0]); 974 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 975 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 976 977 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 978 return false; 979 980 if (txs & MT_TXS0_QUEUE_TIMEOUT) 981 return false; 982 983 if (!ack_timeout) 984 info->flags |= IEEE80211_TX_STAT_ACK; 985 986 info->status.ampdu_len = 1; 987 info->status.ampdu_ack_len = !!(info->flags & 988 IEEE80211_TX_STAT_ACK); 989 990 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 991 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 992 993 first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY); 994 995 if (fixed_rate && !probe) { 996 info->status.rates[0].count = count; 997 i = 0; 998 goto out; 999 } 1000 1001 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1002 rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) - 1003 rate_set_tsf) < 1000000); 1004 rs_idx ^= rate_set_tsf & BIT(0); 1005 rs = &sta->rateset[rs_idx]; 1006 1007 if (!first_idx && rs->probe_rate.idx >= 0) { 1008 info->status.rates[0] = rs->probe_rate; 1009 1010 spin_lock_bh(&dev->mt76.lock); 1011 if (sta->rate_probe) { 1012 mt7603_wtbl_set_rates(dev, sta, NULL, 1013 sta->rates); 1014 sta->rate_probe = false; 1015 } 1016 spin_unlock_bh(&dev->mt76.lock); 1017 } else 1018 info->status.rates[0] = rs->rates[first_idx / 2]; 1019 info->status.rates[0].count = 0; 1020 1021 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1022 struct ieee80211_tx_rate *cur_rate; 1023 int cur_count; 1024 1025 cur_rate = &rs->rates[idx / 2]; 1026 cur_count = min_t(int, MT7603_RATE_RETRY, count); 1027 count -= cur_count; 1028 1029 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1030 cur_rate->flags != info->status.rates[i].flags)) { 1031 i++; 1032 if (i == ARRAY_SIZE(info->status.rates)) 1033 break; 1034 1035 info->status.rates[i] = *cur_rate; 1036 info->status.rates[i].count = 0; 1037 } 1038 1039 info->status.rates[i].count += cur_count; 1040 } 1041 1042 out: 1043 final_rate_flags = info->status.rates[i].flags; 1044 1045 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1046 case MT_PHY_TYPE_CCK: 1047 cck = true; 1048 /* fall through */ 1049 case MT_PHY_TYPE_OFDM: 1050 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) 1051 sband = &dev->mt76.sband_5g.sband; 1052 else 1053 sband = &dev->mt76.sband_2g.sband; 1054 final_rate &= GENMASK(5, 0); 1055 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1056 cck); 1057 final_rate_flags = 0; 1058 break; 1059 case MT_PHY_TYPE_HT_GF: 1060 case MT_PHY_TYPE_HT: 1061 final_rate_flags |= IEEE80211_TX_RC_MCS; 1062 final_rate &= GENMASK(5, 0); 1063 if (final_rate > 15) 1064 return false; 1065 break; 1066 default: 1067 return false; 1068 } 1069 1070 info->status.rates[i].idx = final_rate; 1071 info->status.rates[i].flags = final_rate_flags; 1072 1073 return true; 1074 } 1075 1076 static bool 1077 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, 1078 __le32 *txs_data) 1079 { 1080 struct mt76_dev *mdev = &dev->mt76; 1081 struct sk_buff_head list; 1082 struct sk_buff *skb; 1083 1084 if (pid < MT_PACKET_ID_FIRST) 1085 return false; 1086 1087 mt76_tx_status_lock(mdev, &list); 1088 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1089 if (skb) { 1090 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1091 1092 if (!mt7603_fill_txs(dev, sta, info, txs_data)) { 1093 ieee80211_tx_info_clear_status(info); 1094 info->status.rates[0].idx = -1; 1095 } 1096 1097 mt76_tx_status_skb_done(mdev, skb, &list); 1098 } 1099 mt76_tx_status_unlock(mdev, &list); 1100 1101 return !!skb; 1102 } 1103 1104 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) 1105 { 1106 struct ieee80211_tx_info info = {}; 1107 struct ieee80211_sta *sta = NULL; 1108 struct mt7603_sta *msta = NULL; 1109 struct mt76_wcid *wcid; 1110 __le32 *txs_data = data; 1111 u32 txs; 1112 u8 wcidx; 1113 u8 pid; 1114 1115 txs = le32_to_cpu(txs_data[4]); 1116 pid = FIELD_GET(MT_TXS4_PID, txs); 1117 txs = le32_to_cpu(txs_data[3]); 1118 wcidx = FIELD_GET(MT_TXS3_WCID, txs); 1119 1120 if (pid == MT_PACKET_ID_NO_ACK) 1121 return; 1122 1123 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) 1124 return; 1125 1126 rcu_read_lock(); 1127 1128 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1129 if (!wcid) 1130 goto out; 1131 1132 msta = container_of(wcid, struct mt7603_sta, wcid); 1133 sta = wcid_to_sta(wcid); 1134 1135 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) 1136 goto out; 1137 1138 if (wcidx >= MT7603_WTBL_STA || !sta) 1139 goto out; 1140 1141 if (mt7603_fill_txs(dev, msta, &info, txs_data)) 1142 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); 1143 1144 out: 1145 rcu_read_unlock(); 1146 } 1147 1148 void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 1149 struct mt76_queue_entry *e) 1150 { 1151 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1152 struct sk_buff *skb = e->skb; 1153 1154 if (!e->txwi) { 1155 dev_kfree_skb_any(skb); 1156 return; 1157 } 1158 1159 if (qid < 4) 1160 dev->tx_hang_check = 0; 1161 1162 mt76_tx_complete_skb(mdev, skb); 1163 } 1164 1165 static bool 1166 wait_for_wpdma(struct mt7603_dev *dev) 1167 { 1168 return mt76_poll(dev, MT_WPDMA_GLO_CFG, 1169 MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 1170 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 1171 0, 1000); 1172 } 1173 1174 static void mt7603_pse_reset(struct mt7603_dev *dev) 1175 { 1176 /* Clear previous reset result */ 1177 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1178 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); 1179 1180 /* Reset PSE */ 1181 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1182 1183 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, 1184 MT_MCU_DEBUG_RESET_PSE_S, 1185 MT_MCU_DEBUG_RESET_PSE_S, 500)) { 1186 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; 1187 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1188 } else { 1189 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1190 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); 1191 } 1192 1193 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) 1194 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1195 } 1196 1197 void mt7603_mac_dma_start(struct mt7603_dev *dev) 1198 { 1199 mt7603_mac_start(dev); 1200 1201 wait_for_wpdma(dev); 1202 usleep_range(50, 100); 1203 1204 mt76_set(dev, MT_WPDMA_GLO_CFG, 1205 (MT_WPDMA_GLO_CFG_TX_DMA_EN | 1206 MT_WPDMA_GLO_CFG_RX_DMA_EN | 1207 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 1208 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); 1209 1210 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); 1211 } 1212 1213 void mt7603_mac_start(struct mt7603_dev *dev) 1214 { 1215 mt76_clear(dev, MT_ARB_SCR, 1216 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1217 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); 1218 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1219 } 1220 1221 void mt7603_mac_stop(struct mt7603_dev *dev) 1222 { 1223 mt76_set(dev, MT_ARB_SCR, 1224 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1225 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); 1226 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1227 } 1228 1229 void mt7603_pse_client_reset(struct mt7603_dev *dev) 1230 { 1231 u32 addr; 1232 1233 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + 1234 MT_CLIENT_RESET_TX); 1235 1236 /* Clear previous reset state */ 1237 mt76_clear(dev, addr, 1238 MT_CLIENT_RESET_TX_R_E_1 | 1239 MT_CLIENT_RESET_TX_R_E_2 | 1240 MT_CLIENT_RESET_TX_R_E_1_S | 1241 MT_CLIENT_RESET_TX_R_E_2_S); 1242 1243 /* Start PSE client TX abort */ 1244 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); 1245 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, 1246 MT_CLIENT_RESET_TX_R_E_1_S, 500); 1247 1248 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); 1249 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 1250 1251 /* Wait for PSE client to clear TX FIFO */ 1252 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, 1253 MT_CLIENT_RESET_TX_R_E_2_S, 500); 1254 1255 /* Clear PSE client TX abort state */ 1256 mt76_clear(dev, addr, 1257 MT_CLIENT_RESET_TX_R_E_1 | 1258 MT_CLIENT_RESET_TX_R_E_2); 1259 } 1260 1261 static void mt7603_dma_sched_reset(struct mt7603_dev *dev) 1262 { 1263 if (!is_mt7628(dev)) 1264 return; 1265 1266 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); 1267 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); 1268 } 1269 1270 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) 1271 { 1272 int beacon_int = dev->mt76.beacon_int; 1273 u32 mask = dev->mt76.mmio.irqmask; 1274 int i; 1275 1276 ieee80211_stop_queues(dev->mt76.hw); 1277 set_bit(MT76_RESET, &dev->mt76.state); 1278 1279 /* lock/unlock all queues to ensure that no tx is pending */ 1280 mt76_txq_schedule_all(&dev->mt76); 1281 1282 tasklet_disable(&dev->mt76.tx_tasklet); 1283 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 1284 napi_disable(&dev->mt76.napi[0]); 1285 napi_disable(&dev->mt76.napi[1]); 1286 napi_disable(&dev->mt76.tx_napi); 1287 1288 mutex_lock(&dev->mt76.mutex); 1289 1290 mt7603_beacon_set_timer(dev, -1, 0); 1291 1292 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || 1293 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || 1294 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || 1295 dev->cur_reset_cause == RESET_CAUSE_TX_HANG) 1296 mt7603_pse_reset(dev); 1297 1298 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1299 goto skip_dma_reset; 1300 1301 mt7603_mac_stop(dev); 1302 1303 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1304 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1305 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1306 usleep_range(1000, 2000); 1307 1308 mt7603_irq_disable(dev, mask); 1309 1310 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); 1311 1312 mt7603_pse_client_reset(dev); 1313 1314 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) 1315 mt76_queue_tx_cleanup(dev, i, true); 1316 1317 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 1318 mt76_queue_rx_reset(dev, i); 1319 1320 mt7603_dma_sched_reset(dev); 1321 1322 mt7603_mac_dma_start(dev); 1323 1324 mt7603_irq_enable(dev, mask); 1325 1326 skip_dma_reset: 1327 clear_bit(MT76_RESET, &dev->mt76.state); 1328 mutex_unlock(&dev->mt76.mutex); 1329 1330 tasklet_enable(&dev->mt76.tx_tasklet); 1331 napi_enable(&dev->mt76.tx_napi); 1332 napi_schedule(&dev->mt76.tx_napi); 1333 1334 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); 1335 mt7603_beacon_set_timer(dev, -1, beacon_int); 1336 1337 napi_enable(&dev->mt76.napi[0]); 1338 napi_schedule(&dev->mt76.napi[0]); 1339 1340 napi_enable(&dev->mt76.napi[1]); 1341 napi_schedule(&dev->mt76.napi[1]); 1342 1343 ieee80211_wake_queues(dev->mt76.hw); 1344 mt76_txq_schedule_all(&dev->mt76); 1345 } 1346 1347 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) 1348 { 1349 u32 val; 1350 1351 mt76_wr(dev, MT_WPDMA_DEBUG, 1352 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | 1353 MT_WPDMA_DEBUG_SEL); 1354 1355 val = mt76_rr(dev, MT_WPDMA_DEBUG); 1356 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); 1357 } 1358 1359 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) 1360 { 1361 if (is_mt7628(dev)) 1362 return mt7603_dma_debug(dev, 9) & BIT(9); 1363 1364 return mt7603_dma_debug(dev, 2) & BIT(8); 1365 } 1366 1367 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) 1368 { 1369 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) 1370 return false; 1371 1372 return mt7603_rx_fifo_busy(dev); 1373 } 1374 1375 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) 1376 { 1377 u32 val; 1378 1379 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) 1380 return false; 1381 1382 val = mt7603_dma_debug(dev, 9); 1383 return (val & BIT(8)) && (val & 0xf) != 0xf; 1384 } 1385 1386 static bool mt7603_tx_hang(struct mt7603_dev *dev) 1387 { 1388 struct mt76_queue *q; 1389 u32 dma_idx, prev_dma_idx; 1390 int i; 1391 1392 for (i = 0; i < 4; i++) { 1393 q = dev->mt76.q_tx[i].q; 1394 1395 if (!q->queued) 1396 continue; 1397 1398 prev_dma_idx = dev->tx_dma_idx[i]; 1399 dma_idx = readl(&q->regs->dma_idx); 1400 dev->tx_dma_idx[i] = dma_idx; 1401 1402 if (dma_idx == prev_dma_idx && 1403 dma_idx != readl(&q->regs->cpu_idx)) 1404 break; 1405 } 1406 1407 return i < 4; 1408 } 1409 1410 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) 1411 { 1412 u32 addr, val; 1413 1414 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) 1415 return true; 1416 1417 if (mt7603_rx_fifo_busy(dev)) 1418 return false; 1419 1420 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); 1421 mt76_wr(dev, addr, 3); 1422 val = mt76_rr(dev, addr) >> 16; 1423 1424 if (is_mt7628(dev) && (val & 0x4001) == 0x4001) 1425 return true; 1426 1427 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; 1428 } 1429 1430 static bool 1431 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, 1432 enum mt7603_reset_cause cause, 1433 bool (*check)(struct mt7603_dev *dev)) 1434 { 1435 if (dev->reset_test == cause + 1) { 1436 dev->reset_test = 0; 1437 goto trigger; 1438 } 1439 1440 if (check) { 1441 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { 1442 *counter = 0; 1443 return false; 1444 } 1445 1446 (*counter)++; 1447 } 1448 1449 if (*counter < MT7603_WATCHDOG_TIMEOUT) 1450 return false; 1451 trigger: 1452 dev->cur_reset_cause = cause; 1453 dev->reset_cause[cause]++; 1454 return true; 1455 } 1456 1457 void mt7603_update_channel(struct mt76_dev *mdev) 1458 { 1459 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1460 struct mt76_channel_state *state; 1461 ktime_t cur_time; 1462 u32 busy; 1463 1464 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) 1465 return; 1466 1467 state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); 1468 busy = mt76_rr(dev, MT_MIB_STAT_PSCCA); 1469 1470 spin_lock_bh(&dev->mt76.cc_lock); 1471 cur_time = ktime_get_boottime(); 1472 state->cc_busy += busy; 1473 state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time)); 1474 dev->survey_time = cur_time; 1475 spin_unlock_bh(&dev->mt76.cc_lock); 1476 } 1477 1478 void 1479 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) 1480 { 1481 u32 rxtd_6 = 0xd7c80000; 1482 1483 if (val == dev->ed_strict_mode) 1484 return; 1485 1486 dev->ed_strict_mode = val; 1487 1488 /* Ensure that ED/CCA does not trigger if disabled */ 1489 if (!dev->ed_monitor) 1490 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); 1491 else 1492 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); 1493 1494 if (dev->ed_monitor && !dev->ed_strict_mode) 1495 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); 1496 else 1497 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); 1498 1499 mt76_wr(dev, MT_RXTD(6), rxtd_6); 1500 1501 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, 1502 dev->ed_monitor && !dev->ed_strict_mode); 1503 } 1504 1505 static void 1506 mt7603_edcca_check(struct mt7603_dev *dev) 1507 { 1508 u32 val = mt76_rr(dev, MT_AGC(41)); 1509 ktime_t cur_time; 1510 int rssi0, rssi1; 1511 u32 active; 1512 u32 ed_busy; 1513 1514 if (!dev->ed_monitor) 1515 return; 1516 1517 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); 1518 if (rssi0 > 128) 1519 rssi0 -= 256; 1520 1521 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); 1522 if (rssi1 > 128) 1523 rssi1 -= 256; 1524 1525 if (max(rssi0, rssi1) >= -40 && 1526 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) 1527 dev->ed_strong_signal++; 1528 else if (dev->ed_strong_signal > 0) 1529 dev->ed_strong_signal--; 1530 1531 cur_time = ktime_get_boottime(); 1532 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; 1533 1534 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1535 dev->ed_time = cur_time; 1536 1537 if (!active) 1538 return; 1539 1540 if (100 * ed_busy / active > 90) { 1541 if (dev->ed_trigger < 0) 1542 dev->ed_trigger = 0; 1543 dev->ed_trigger++; 1544 } else { 1545 if (dev->ed_trigger > 0) 1546 dev->ed_trigger = 0; 1547 dev->ed_trigger--; 1548 } 1549 1550 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || 1551 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { 1552 mt7603_edcca_set_strict(dev, true); 1553 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { 1554 mt7603_edcca_set_strict(dev, false); 1555 } 1556 1557 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) 1558 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; 1559 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) 1560 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; 1561 } 1562 1563 void mt7603_cca_stats_reset(struct mt7603_dev *dev) 1564 { 1565 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1566 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1567 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); 1568 } 1569 1570 static void 1571 mt7603_adjust_sensitivity(struct mt7603_dev *dev) 1572 { 1573 u32 agc0 = dev->agc0, agc3 = dev->agc3; 1574 u32 adj; 1575 1576 if (!dev->sensitivity || dev->sensitivity < -100) { 1577 dev->sensitivity = 0; 1578 } else if (dev->sensitivity <= -84) { 1579 adj = 7 + (dev->sensitivity + 92) / 2; 1580 1581 agc0 = 0x56f0076f; 1582 agc0 |= adj << 12; 1583 agc0 |= adj << 16; 1584 agc3 = 0x81d0d5e3; 1585 } else if (dev->sensitivity <= -72) { 1586 adj = 7 + (dev->sensitivity + 80) / 2; 1587 1588 agc0 = 0x6af0006f; 1589 agc0 |= adj << 8; 1590 agc0 |= adj << 12; 1591 agc0 |= adj << 16; 1592 1593 agc3 = 0x8181d5e3; 1594 } else { 1595 if (dev->sensitivity > -54) 1596 dev->sensitivity = -54; 1597 1598 adj = 7 + (dev->sensitivity + 80) / 2; 1599 1600 agc0 = 0x7ff0000f; 1601 agc0 |= adj << 4; 1602 agc0 |= adj << 8; 1603 agc0 |= adj << 12; 1604 agc0 |= adj << 16; 1605 1606 agc3 = 0x818181e3; 1607 } 1608 1609 mt76_wr(dev, MT_AGC(0), agc0); 1610 mt76_wr(dev, MT_AGC1(0), agc0); 1611 1612 mt76_wr(dev, MT_AGC(3), agc3); 1613 mt76_wr(dev, MT_AGC1(3), agc3); 1614 } 1615 1616 static void 1617 mt7603_false_cca_check(struct mt7603_dev *dev) 1618 { 1619 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; 1620 int false_cca; 1621 int min_signal; 1622 u32 val; 1623 1624 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); 1625 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); 1626 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); 1627 1628 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); 1629 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); 1630 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); 1631 1632 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1633 dev->false_cca_cck = pd_cck - mdrdy_cck; 1634 1635 mt7603_cca_stats_reset(dev); 1636 1637 min_signal = mt76_get_min_avg_rssi(&dev->mt76); 1638 if (!min_signal) { 1639 dev->sensitivity = 0; 1640 dev->last_cca_adj = jiffies; 1641 goto out; 1642 } 1643 1644 min_signal -= 15; 1645 1646 false_cca = dev->false_cca_ofdm + dev->false_cca_cck; 1647 if (false_cca > 600) { 1648 if (!dev->sensitivity) 1649 dev->sensitivity = -92; 1650 else 1651 dev->sensitivity += 2; 1652 dev->last_cca_adj = jiffies; 1653 } else if (false_cca < 100 || 1654 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { 1655 dev->last_cca_adj = jiffies; 1656 if (!dev->sensitivity) 1657 goto out; 1658 1659 dev->sensitivity -= 2; 1660 } 1661 1662 if (dev->sensitivity && dev->sensitivity > min_signal) { 1663 dev->sensitivity = min_signal; 1664 dev->last_cca_adj = jiffies; 1665 } 1666 1667 out: 1668 mt7603_adjust_sensitivity(dev); 1669 } 1670 1671 void mt7603_mac_work(struct work_struct *work) 1672 { 1673 struct mt7603_dev *dev = container_of(work, struct mt7603_dev, 1674 mt76.mac_work.work); 1675 bool reset = false; 1676 1677 mt76_tx_status_check(&dev->mt76, NULL, false); 1678 1679 mutex_lock(&dev->mt76.mutex); 1680 1681 dev->mac_work_count++; 1682 mt7603_update_channel(&dev->mt76); 1683 mt7603_edcca_check(dev); 1684 1685 if (dev->mac_work_count == 10) 1686 mt7603_false_cca_check(dev); 1687 1688 if (mt7603_watchdog_check(dev, &dev->rx_pse_check, 1689 RESET_CAUSE_RX_PSE_BUSY, 1690 mt7603_rx_pse_busy) || 1691 mt7603_watchdog_check(dev, &dev->beacon_check, 1692 RESET_CAUSE_BEACON_STUCK, 1693 NULL) || 1694 mt7603_watchdog_check(dev, &dev->tx_hang_check, 1695 RESET_CAUSE_TX_HANG, 1696 mt7603_tx_hang) || 1697 mt7603_watchdog_check(dev, &dev->tx_dma_check, 1698 RESET_CAUSE_TX_BUSY, 1699 mt7603_tx_dma_busy) || 1700 mt7603_watchdog_check(dev, &dev->rx_dma_check, 1701 RESET_CAUSE_RX_BUSY, 1702 mt7603_rx_dma_busy) || 1703 mt7603_watchdog_check(dev, &dev->mcu_hang, 1704 RESET_CAUSE_MCU_HANG, 1705 NULL) || 1706 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { 1707 dev->beacon_check = 0; 1708 dev->tx_dma_check = 0; 1709 dev->tx_hang_check = 0; 1710 dev->rx_dma_check = 0; 1711 dev->rx_pse_check = 0; 1712 dev->mcu_hang = 0; 1713 dev->rx_dma_idx = ~0; 1714 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); 1715 reset = true; 1716 dev->mac_work_count = 0; 1717 } 1718 1719 if (dev->mac_work_count >= 10) 1720 dev->mac_work_count = 0; 1721 1722 mutex_unlock(&dev->mt76.mutex); 1723 1724 if (reset) 1725 mt7603_mac_watchdog_reset(dev); 1726 1727 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, 1728 msecs_to_jiffies(MT7603_WATCHDOG_TIME)); 1729 } 1730