1 // SPDX-License-Identifier: ISC 2 3 #include <linux/etherdevice.h> 4 #include <linux/timekeeping.h> 5 #include "mt7603.h" 6 #include "mac.h" 7 #include "../trace.h" 8 9 #define MT_PSE_PAGE_SIZE 128 10 11 static u32 12 mt7603_ac_queue_mask0(u32 mask) 13 { 14 u32 ret = 0; 15 16 ret |= GENMASK(3, 0) * !!(mask & BIT(0)); 17 ret |= GENMASK(8, 5) * !!(mask & BIT(1)); 18 ret |= GENMASK(13, 10) * !!(mask & BIT(2)); 19 ret |= GENMASK(19, 16) * !!(mask & BIT(3)); 20 return ret; 21 } 22 23 static void 24 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) 25 { 26 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); 27 } 28 29 static void 30 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) 31 { 32 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); 33 } 34 35 void mt7603_mac_reset_counters(struct mt7603_dev *dev) 36 { 37 int i; 38 39 for (i = 0; i < 2; i++) 40 mt76_rr(dev, MT_TX_AGG_CNT(i)); 41 42 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 43 } 44 45 void mt7603_mac_set_timing(struct mt7603_dev *dev) 46 { 47 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 48 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 49 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 50 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 51 int offset = 3 * dev->coverage_class; 52 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 53 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 54 int sifs; 55 u32 val; 56 57 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) 58 sifs = 16; 59 else 60 sifs = 10; 61 62 mt76_set(dev, MT_ARB_SCR, 63 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 64 udelay(1); 65 66 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); 67 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); 68 mt76_wr(dev, MT_IFS, 69 FIELD_PREP(MT_IFS_EIFS, 360) | 70 FIELD_PREP(MT_IFS_RIFS, 2) | 71 FIELD_PREP(MT_IFS_SIFS, sifs) | 72 FIELD_PREP(MT_IFS_SLOT, dev->slottime)); 73 74 if (dev->slottime < 20) 75 val = MT7603_CFEND_RATE_DEFAULT; 76 else 77 val = MT7603_CFEND_RATE_11B; 78 79 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); 80 81 mt76_clear(dev, MT_ARB_SCR, 82 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 83 } 84 85 static void 86 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) 87 { 88 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 89 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 90 91 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 92 } 93 94 static u32 95 mt7603_wtbl1_addr(int idx) 96 { 97 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 98 } 99 100 static u32 101 mt7603_wtbl2_addr(int idx) 102 { 103 /* Mapped to WTBL2 */ 104 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; 105 } 106 107 static u32 108 mt7603_wtbl3_addr(int idx) 109 { 110 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); 111 112 return base + idx * MT_WTBL3_SIZE; 113 } 114 115 static u32 116 mt7603_wtbl4_addr(int idx) 117 { 118 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); 119 120 return base + idx * MT_WTBL4_SIZE; 121 } 122 123 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 124 const u8 *mac_addr) 125 { 126 const void *_mac = mac_addr; 127 u32 addr = mt7603_wtbl1_addr(idx); 128 u32 w0 = 0, w1 = 0; 129 int i; 130 131 if (_mac) { 132 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, 133 get_unaligned_le16(_mac + 4)); 134 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, 135 get_unaligned_le32(_mac)); 136 } 137 138 if (vif < 0) 139 vif = 0; 140 else 141 w0 |= MT_WTBL1_W0_RX_CHECK_A1; 142 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); 143 144 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 145 146 mt76_set(dev, addr + 0 * 4, w0); 147 mt76_set(dev, addr + 1 * 4, w1); 148 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); 149 150 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 151 addr = mt7603_wtbl2_addr(idx); 152 for (i = 0; i < MT_WTBL2_SIZE; i += 4) 153 mt76_wr(dev, addr + i, 0); 154 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 155 mt76_start_tx_ac(dev, GENMASK(3, 0)); 156 157 addr = mt7603_wtbl3_addr(idx); 158 for (i = 0; i < MT_WTBL3_SIZE; i += 4) 159 mt76_wr(dev, addr + i, 0); 160 161 addr = mt7603_wtbl4_addr(idx); 162 for (i = 0; i < MT_WTBL4_SIZE; i += 4) 163 mt76_wr(dev, addr + i, 0); 164 165 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 166 } 167 168 static void 169 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) 170 { 171 u32 addr = mt7603_wtbl1_addr(idx); 172 u32 val = mt76_rr(dev, addr + 3 * 4); 173 174 val &= ~MT_WTBL1_W3_SKIP_TX; 175 val |= enabled * MT_WTBL1_W3_SKIP_TX; 176 177 mt76_wr(dev, addr + 3 * 4, val); 178 } 179 180 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) 181 { 182 int i, port, queue; 183 184 if (abort) { 185 port = 3; /* PSE */ 186 queue = 8; /* free queue */ 187 } else { 188 port = 0; /* HIF */ 189 queue = 1; /* MCU queue */ 190 } 191 192 mt7603_wtbl_set_skip_tx(dev, idx, true); 193 194 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | 195 FIELD_PREP(MT_TX_ABORT_WCID, idx)); 196 197 for (i = 0; i < 4; i++) { 198 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 199 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | 200 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | 201 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | 202 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); 203 204 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 205 0, 5000)); 206 } 207 208 mt76_wr(dev, MT_TX_ABORT, 0); 209 210 mt7603_wtbl_set_skip_tx(dev, idx, false); 211 } 212 213 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 214 bool enabled) 215 { 216 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); 217 218 if (sta->smps == enabled) 219 return; 220 221 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); 222 sta->smps = enabled; 223 } 224 225 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 226 bool enabled) 227 { 228 int idx = sta->wcid.idx; 229 u32 addr; 230 231 spin_lock_bh(&dev->ps_lock); 232 233 if (sta->ps == enabled) 234 goto out; 235 236 mt76_wr(dev, MT_PSE_RTA, 237 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | 238 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | 239 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | 240 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | 241 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); 242 243 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 244 245 if (enabled) 246 mt7603_filter_tx(dev, idx, false); 247 248 addr = mt7603_wtbl1_addr(idx); 249 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 250 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, 251 enabled * MT_WTBL1_W3_POWER_SAVE); 252 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 253 sta->ps = enabled; 254 255 out: 256 spin_unlock_bh(&dev->ps_lock); 257 } 258 259 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) 260 { 261 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; 262 int wtbl2_frame = idx / wtbl2_frame_size; 263 int wtbl2_entry = idx % wtbl2_frame_size; 264 265 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; 266 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; 267 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; 268 int wtbl3_entry = (idx % wtbl3_frame_size) * 2; 269 270 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; 271 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; 272 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; 273 int wtbl4_entry = idx % wtbl4_frame_size; 274 275 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 276 int i; 277 278 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 279 280 mt76_wr(dev, addr + 0 * 4, 281 MT_WTBL1_W0_RX_CHECK_A1 | 282 MT_WTBL1_W0_RX_CHECK_A2 | 283 MT_WTBL1_W0_RX_VALID); 284 mt76_wr(dev, addr + 1 * 4, 0); 285 mt76_wr(dev, addr + 2 * 4, 0); 286 287 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 288 289 mt76_wr(dev, addr + 3 * 4, 290 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | 291 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | 292 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | 293 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); 294 mt76_wr(dev, addr + 4 * 4, 295 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | 296 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | 297 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); 298 299 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 300 301 addr = mt7603_wtbl2_addr(idx); 302 303 /* Clear BA information */ 304 mt76_wr(dev, addr + (15 * 4), 0); 305 306 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 307 for (i = 2; i <= 4; i++) 308 mt76_wr(dev, addr + (i * 4), 0); 309 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 310 mt76_start_tx_ac(dev, GENMASK(3, 0)); 311 312 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); 313 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); 314 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 315 } 316 317 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) 318 { 319 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 320 int idx = msta->wcid.idx; 321 u32 addr; 322 u32 val; 323 324 addr = mt7603_wtbl1_addr(idx); 325 326 val = mt76_rr(dev, addr + 2 * 4); 327 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; 328 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | 329 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | 330 MT_WTBL1_W2_TXS_BAF_REPORT; 331 332 if (sta->ht_cap.cap) 333 val |= MT_WTBL1_W2_HT; 334 if (sta->vht_cap.cap) 335 val |= MT_WTBL1_W2_VHT; 336 337 mt76_wr(dev, addr + 2 * 4, val); 338 339 addr = mt7603_wtbl2_addr(idx); 340 val = mt76_rr(dev, addr + 9 * 4); 341 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 342 MT_WTBL2_W9_SHORT_GI_80); 343 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 344 val |= MT_WTBL2_W9_SHORT_GI_20; 345 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 346 val |= MT_WTBL2_W9_SHORT_GI_40; 347 mt76_wr(dev, addr + 9 * 4, val); 348 } 349 350 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) 351 { 352 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); 353 mt76_wr(dev, MT_BA_CONTROL_1, 354 (get_unaligned_le16(addr + 4) | 355 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | 356 MT_BA_CONTROL_1_RESET)); 357 } 358 359 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, 360 int ba_size) 361 { 362 u32 addr = mt7603_wtbl2_addr(wcid); 363 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 364 (MT_WTBL2_W15_BA_WIN_SIZE << 365 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); 366 u32 tid_val; 367 int i; 368 369 if (ba_size < 0) { 370 /* disable */ 371 mt76_clear(dev, addr + (15 * 4), tid_mask); 372 return; 373 } 374 375 for (i = 7; i > 0; i--) { 376 if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 377 break; 378 } 379 380 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 381 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); 382 383 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); 384 } 385 386 void mt7603_mac_sta_poll(struct mt7603_dev *dev) 387 { 388 static const u8 ac_to_tid[4] = { 389 [IEEE80211_AC_BE] = 0, 390 [IEEE80211_AC_BK] = 1, 391 [IEEE80211_AC_VI] = 4, 392 [IEEE80211_AC_VO] = 6 393 }; 394 struct ieee80211_sta *sta; 395 struct mt7603_sta *msta; 396 u32 total_airtime = 0; 397 u32 airtime[4]; 398 u32 addr; 399 int i; 400 401 rcu_read_lock(); 402 403 while (1) { 404 bool clear = false; 405 406 spin_lock_bh(&dev->sta_poll_lock); 407 if (list_empty(&dev->sta_poll_list)) { 408 spin_unlock_bh(&dev->sta_poll_lock); 409 break; 410 } 411 412 msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta, 413 poll_list); 414 list_del_init(&msta->poll_list); 415 spin_unlock_bh(&dev->sta_poll_lock); 416 417 addr = mt7603_wtbl4_addr(msta->wcid.idx); 418 for (i = 0; i < 4; i++) { 419 u32 airtime_last = msta->tx_airtime_ac[i]; 420 421 msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8); 422 airtime[i] = msta->tx_airtime_ac[i] - airtime_last; 423 airtime[i] *= 32; 424 total_airtime += airtime[i]; 425 426 if (msta->tx_airtime_ac[i] & BIT(22)) 427 clear = true; 428 } 429 430 if (clear) { 431 mt7603_wtbl_update(dev, msta->wcid.idx, 432 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 433 memset(msta->tx_airtime_ac, 0, 434 sizeof(msta->tx_airtime_ac)); 435 } 436 437 if (!msta->wcid.sta) 438 continue; 439 440 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 441 for (i = 0; i < 4; i++) { 442 struct mt76_queue *q = dev->mt76.q_tx[i].q; 443 u8 qidx = q->hw_idx; 444 u8 tid = ac_to_tid[i]; 445 u32 txtime = airtime[qidx]; 446 447 if (!txtime) 448 continue; 449 450 ieee80211_sta_register_airtime(sta, tid, txtime, 0); 451 } 452 } 453 454 rcu_read_unlock(); 455 456 if (!total_airtime) 457 return; 458 459 spin_lock_bh(&dev->mt76.cc_lock); 460 dev->mphy.chan_state->cc_tx += total_airtime; 461 spin_unlock_bh(&dev->mt76.cc_lock); 462 } 463 464 static struct mt76_wcid * 465 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) 466 { 467 struct mt7603_sta *sta; 468 struct mt76_wcid *wcid; 469 470 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 471 return NULL; 472 473 wcid = rcu_dereference(dev->mt76.wcid[idx]); 474 if (unicast || !wcid) 475 return wcid; 476 477 if (!wcid->sta) 478 return NULL; 479 480 sta = container_of(wcid, struct mt7603_sta, wcid); 481 if (!sta->vif) 482 return NULL; 483 484 return &sta->vif->sta.wcid; 485 } 486 487 int 488 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) 489 { 490 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 491 struct ieee80211_supported_band *sband; 492 struct ieee80211_hdr *hdr; 493 __le32 *rxd = (__le32 *)skb->data; 494 u32 rxd0 = le32_to_cpu(rxd[0]); 495 u32 rxd1 = le32_to_cpu(rxd[1]); 496 u32 rxd2 = le32_to_cpu(rxd[2]); 497 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; 498 bool insert_ccmp_hdr = false; 499 bool remove_pad; 500 int idx; 501 int i; 502 503 memset(status, 0, sizeof(*status)); 504 505 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 506 sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband; 507 i >>= 1; 508 509 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 510 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); 511 512 status->band = sband->band; 513 if (i < sband->n_channels) 514 status->freq = sband->channels[i].center_freq; 515 516 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 517 status->flag |= RX_FLAG_FAILED_FCS_CRC; 518 519 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 520 status->flag |= RX_FLAG_MMIC_ERROR; 521 522 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 523 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 524 status->flag |= RX_FLAG_DECRYPTED; 525 status->flag |= RX_FLAG_IV_STRIPPED; 526 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 527 } 528 529 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 530 MT_RXD2_NORMAL_NON_AMPDU))) { 531 status->flag |= RX_FLAG_AMPDU_DETAILS; 532 533 /* all subframes of an A-MPDU have the same timestamp */ 534 if (dev->rx_ampdu_ts != rxd[12]) { 535 if (!++dev->ampdu_ref) 536 dev->ampdu_ref++; 537 } 538 dev->rx_ampdu_ts = rxd[12]; 539 540 status->ampdu_ref = dev->ampdu_ref; 541 } 542 543 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 544 545 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 546 return -EINVAL; 547 548 if (!sband->channels) 549 return -EINVAL; 550 551 rxd += 4; 552 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 553 rxd += 4; 554 if ((u8 *)rxd - skb->data >= skb->len) 555 return -EINVAL; 556 } 557 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 558 u8 *data = (u8 *)rxd; 559 560 if (status->flag & RX_FLAG_DECRYPTED) { 561 status->iv[0] = data[5]; 562 status->iv[1] = data[4]; 563 status->iv[2] = data[3]; 564 status->iv[3] = data[2]; 565 status->iv[4] = data[1]; 566 status->iv[5] = data[0]; 567 568 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 569 } 570 571 rxd += 4; 572 if ((u8 *)rxd - skb->data >= skb->len) 573 return -EINVAL; 574 } 575 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 576 rxd += 2; 577 if ((u8 *)rxd - skb->data >= skb->len) 578 return -EINVAL; 579 } 580 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 581 u32 rxdg0 = le32_to_cpu(rxd[0]); 582 u32 rxdg3 = le32_to_cpu(rxd[3]); 583 bool cck = false; 584 585 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 586 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 587 case MT_PHY_TYPE_CCK: 588 cck = true; 589 /* fall through */ 590 case MT_PHY_TYPE_OFDM: 591 i = mt76_get_rate(&dev->mt76, sband, i, cck); 592 break; 593 case MT_PHY_TYPE_HT_GF: 594 case MT_PHY_TYPE_HT: 595 status->encoding = RX_ENC_HT; 596 if (i > 15) 597 return -EINVAL; 598 break; 599 default: 600 return -EINVAL; 601 } 602 603 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 604 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 605 if (rxdg0 & MT_RXV1_HT_AD_CODE) 606 status->enc_flags |= RX_ENC_FLAG_LDPC; 607 608 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * 609 FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 610 611 status->rate_idx = i; 612 613 status->chains = dev->mphy.antenna_mask; 614 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + 615 dev->rssi_offset[0]; 616 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + 617 dev->rssi_offset[1]; 618 619 status->signal = status->chain_signal[0]; 620 if (status->chains & BIT(1)) 621 status->signal = max(status->signal, 622 status->chain_signal[1]); 623 624 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) 625 status->bw = RATE_INFO_BW_40; 626 627 rxd += 6; 628 if ((u8 *)rxd - skb->data >= skb->len) 629 return -EINVAL; 630 } else { 631 return -EINVAL; 632 } 633 634 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 635 636 if (insert_ccmp_hdr) { 637 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 638 639 mt76_insert_ccmp_hdr(skb, key_id); 640 } 641 642 hdr = (struct ieee80211_hdr *)skb->data; 643 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 644 return 0; 645 646 status->aggr = unicast && 647 !ieee80211_is_qos_nullfunc(hdr->frame_control); 648 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 649 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 650 651 return 0; 652 } 653 654 static u16 655 mt7603_mac_tx_rate_val(struct mt7603_dev *dev, 656 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) 657 { 658 u8 phy, nss, rate_idx; 659 u16 rateval; 660 661 *bw = 0; 662 if (rate->flags & IEEE80211_TX_RC_MCS) { 663 rate_idx = rate->idx; 664 nss = 1 + (rate->idx >> 3); 665 phy = MT_PHY_TYPE_HT; 666 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 667 phy = MT_PHY_TYPE_HT_GF; 668 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 669 *bw = 1; 670 } else { 671 const struct ieee80211_rate *r; 672 int band = dev->mphy.chandef.chan->band; 673 u16 val; 674 675 nss = 1; 676 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; 677 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 678 val = r->hw_value_short; 679 else 680 val = r->hw_value; 681 682 phy = val >> 8; 683 rate_idx = val & 0xff; 684 } 685 686 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 687 FIELD_PREP(MT_TX_RATE_MODE, phy)); 688 689 if (stbc && nss == 1) 690 rateval |= MT_TX_RATE_STBC; 691 692 return rateval; 693 } 694 695 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 696 struct ieee80211_tx_rate *probe_rate, 697 struct ieee80211_tx_rate *rates) 698 { 699 struct ieee80211_tx_rate *ref; 700 int wcid = sta->wcid.idx; 701 u32 addr = mt7603_wtbl2_addr(wcid); 702 bool stbc = false; 703 int n_rates = sta->n_rates; 704 u8 bw, bw_prev, bw_idx = 0; 705 u16 val[4]; 706 u16 probe_val; 707 u32 w9 = mt76_rr(dev, addr + 9 * 4); 708 bool rateset; 709 int i, k; 710 711 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 712 return; 713 714 for (i = n_rates; i < 4; i++) 715 rates[i] = rates[n_rates - 1]; 716 717 rateset = !(sta->rate_set_tsf & BIT(0)); 718 memcpy(sta->rateset[rateset].rates, rates, 719 sizeof(sta->rateset[rateset].rates)); 720 if (probe_rate) { 721 sta->rateset[rateset].probe_rate = *probe_rate; 722 ref = &sta->rateset[rateset].probe_rate; 723 } else { 724 sta->rateset[rateset].probe_rate.idx = -1; 725 ref = &sta->rateset[rateset].rates[0]; 726 } 727 728 rates = sta->rateset[rateset].rates; 729 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 730 /* 731 * We don't support switching between short and long GI 732 * within the rate set. For accurate tx status reporting, we 733 * need to make sure that flags match. 734 * For improved performance, avoid duplicate entries by 735 * decrementing the MCS index if necessary 736 */ 737 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 738 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 739 740 for (k = 0; k < i; k++) { 741 if (rates[i].idx != rates[k].idx) 742 continue; 743 if ((rates[i].flags ^ rates[k].flags) & 744 IEEE80211_TX_RC_40_MHZ_WIDTH) 745 continue; 746 747 if (!rates[i].idx) 748 continue; 749 750 rates[i].idx--; 751 } 752 } 753 754 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 755 MT_WTBL2_W9_SHORT_GI_80; 756 757 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); 758 bw_prev = bw; 759 760 if (probe_rate) { 761 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); 762 if (bw) 763 bw_idx = 1; 764 else 765 bw_prev = 0; 766 } else { 767 probe_val = val[0]; 768 } 769 770 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); 771 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); 772 773 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); 774 if (bw_prev) { 775 bw_idx = 3; 776 bw_prev = bw; 777 } 778 779 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); 780 if (bw_prev) { 781 bw_idx = 5; 782 bw_prev = bw; 783 } 784 785 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); 786 if (bw_prev) 787 bw_idx = 7; 788 789 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, 790 bw_idx ? bw_idx - 1 : 7); 791 792 mt76_wr(dev, MT_WTBL_RIUCR0, w9); 793 794 mt76_wr(dev, MT_WTBL_RIUCR1, 795 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 796 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 797 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); 798 799 mt76_wr(dev, MT_WTBL_RIUCR2, 800 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | 801 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 802 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | 803 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 804 805 mt76_wr(dev, MT_WTBL_RIUCR3, 806 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 807 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | 808 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 809 810 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 811 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; 812 813 mt76_wr(dev, MT_WTBL_UPDATE, 814 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 815 MT_WTBL_UPDATE_RATE_UPDATE | 816 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 817 818 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 819 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 820 821 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; 822 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 823 } 824 825 static enum mt7603_cipher_type 826 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 827 { 828 memset(key_data, 0, 32); 829 if (!key) 830 return MT_CIPHER_NONE; 831 832 if (key->keylen > 32) 833 return MT_CIPHER_NONE; 834 835 memcpy(key_data, key->key, key->keylen); 836 837 switch (key->cipher) { 838 case WLAN_CIPHER_SUITE_WEP40: 839 return MT_CIPHER_WEP40; 840 case WLAN_CIPHER_SUITE_WEP104: 841 return MT_CIPHER_WEP104; 842 case WLAN_CIPHER_SUITE_TKIP: 843 /* Rx/Tx MIC keys are swapped */ 844 memcpy(key_data + 16, key->key + 24, 8); 845 memcpy(key_data + 24, key->key + 16, 8); 846 return MT_CIPHER_TKIP; 847 case WLAN_CIPHER_SUITE_CCMP: 848 return MT_CIPHER_AES_CCMP; 849 default: 850 return MT_CIPHER_NONE; 851 } 852 } 853 854 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 855 struct ieee80211_key_conf *key) 856 { 857 enum mt7603_cipher_type cipher; 858 u32 addr = mt7603_wtbl3_addr(wcid); 859 u8 key_data[32]; 860 int key_len = sizeof(key_data); 861 862 cipher = mt7603_mac_get_key_info(key, key_data); 863 if (cipher == MT_CIPHER_NONE && key) 864 return -EOPNOTSUPP; 865 866 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { 867 addr += key->keyidx * 16; 868 key_len = 16; 869 } 870 871 mt76_wr_copy(dev, addr, key_data, key_len); 872 873 addr = mt7603_wtbl1_addr(wcid); 874 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); 875 if (key) 876 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); 877 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); 878 879 return 0; 880 } 881 882 static int 883 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, 884 struct sk_buff *skb, enum mt76_txq_id qid, 885 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 886 int pid, struct ieee80211_key_conf *key) 887 { 888 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 889 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 891 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 892 struct ieee80211_vif *vif = info->control.vif; 893 struct mt76_queue *q = dev->mt76.q_tx[qid].q; 894 struct mt7603_vif *mvif; 895 int wlan_idx; 896 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 897 int tx_count = 8; 898 u8 frame_type, frame_subtype; 899 u16 fc = le16_to_cpu(hdr->frame_control); 900 u16 seqno = 0; 901 u8 vif_idx = 0; 902 u32 val; 903 u8 bw; 904 905 if (vif) { 906 mvif = (struct mt7603_vif *)vif->drv_priv; 907 vif_idx = mvif->idx; 908 if (vif_idx && qid >= MT_TXQ_BEACON) 909 vif_idx += 0x10; 910 } 911 912 if (sta) { 913 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 914 915 tx_count = msta->rate_count; 916 } 917 918 if (wcid) 919 wlan_idx = wcid->idx; 920 else 921 wlan_idx = MT7603_WTBL_RESERVED; 922 923 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; 924 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; 925 926 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 927 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); 928 txwi[0] = cpu_to_le32(val); 929 930 val = MT_TXD1_LONG_FORMAT | 931 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | 932 FIELD_PREP(MT_TXD1_TID, 933 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 934 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 935 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | 936 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | 937 FIELD_PREP(MT_TXD1_PROTECTED, !!key); 938 txwi[1] = cpu_to_le32(val); 939 940 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 941 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); 942 943 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | 944 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | 945 FIELD_PREP(MT_TXD2_MULTICAST, 946 is_multicast_ether_addr(hdr->addr1)); 947 txwi[2] = cpu_to_le32(val); 948 949 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 950 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 951 952 txwi[4] = 0; 953 954 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 955 FIELD_PREP(MT_TXD5_PID, pid); 956 txwi[5] = cpu_to_le32(val); 957 958 txwi[6] = 0; 959 960 if (rate->idx >= 0 && rate->count && 961 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 962 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 963 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); 964 965 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 966 967 val = MT_TXD6_FIXED_BW | 968 FIELD_PREP(MT_TXD6_BW, bw) | 969 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 970 txwi[6] |= cpu_to_le32(val); 971 972 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 973 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 974 975 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 976 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 977 978 tx_count = rate->count; 979 } 980 981 /* use maximum tx count for beacons and buffered multicast */ 982 if (qid >= MT_TXQ_BEACON) 983 tx_count = 0x1f; 984 985 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 986 MT_TXD3_SN_VALID; 987 988 if (ieee80211_is_data_qos(hdr->frame_control)) 989 seqno = le16_to_cpu(hdr->seq_ctrl); 990 else if (ieee80211_is_back_req(hdr->frame_control)) 991 seqno = le16_to_cpu(bar->start_seq_num); 992 else 993 val &= ~MT_TXD3_SN_VALID; 994 995 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); 996 997 txwi[3] = cpu_to_le32(val); 998 999 if (key) { 1000 u64 pn = atomic64_inc_return(&key->tx_pn); 1001 1002 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); 1003 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); 1004 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); 1005 } 1006 1007 txwi[7] = 0; 1008 1009 return 0; 1010 } 1011 1012 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1013 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1014 struct ieee80211_sta *sta, 1015 struct mt76_tx_info *tx_info) 1016 { 1017 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1018 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); 1019 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1020 struct ieee80211_key_conf *key = info->control.hw_key; 1021 int pid; 1022 1023 if (!wcid) 1024 wcid = &dev->global_sta.wcid; 1025 1026 if (sta) { 1027 msta = (struct mt7603_sta *)sta->drv_priv; 1028 1029 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 1030 IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 1031 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 1032 mt7603_wtbl_set_ps(dev, msta, false); 1033 } 1034 1035 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1036 1037 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 1038 spin_lock_bh(&dev->mt76.lock); 1039 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], 1040 msta->rates); 1041 msta->rate_probe = true; 1042 spin_unlock_bh(&dev->mt76.lock); 1043 } 1044 1045 mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid, 1046 sta, pid, key); 1047 1048 return 0; 1049 } 1050 1051 static bool 1052 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, 1053 struct ieee80211_tx_info *info, __le32 *txs_data) 1054 { 1055 struct ieee80211_supported_band *sband; 1056 struct mt7603_rate_set *rs; 1057 int first_idx = 0, last_idx; 1058 u32 rate_set_tsf; 1059 u32 final_rate; 1060 u32 final_rate_flags; 1061 bool rs_idx; 1062 bool ack_timeout; 1063 bool fixed_rate; 1064 bool probe; 1065 bool ampdu; 1066 bool cck = false; 1067 int count; 1068 u32 txs; 1069 int idx; 1070 int i; 1071 1072 fixed_rate = info->status.rates[0].count; 1073 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1074 1075 txs = le32_to_cpu(txs_data[4]); 1076 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); 1077 count = FIELD_GET(MT_TXS4_TX_COUNT, txs); 1078 last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs); 1079 1080 txs = le32_to_cpu(txs_data[0]); 1081 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1082 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1083 1084 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1085 return false; 1086 1087 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1088 return false; 1089 1090 if (!ack_timeout) 1091 info->flags |= IEEE80211_TX_STAT_ACK; 1092 1093 info->status.ampdu_len = 1; 1094 info->status.ampdu_ack_len = !!(info->flags & 1095 IEEE80211_TX_STAT_ACK); 1096 1097 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1098 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1099 1100 first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY); 1101 1102 if (fixed_rate && !probe) { 1103 info->status.rates[0].count = count; 1104 i = 0; 1105 goto out; 1106 } 1107 1108 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1109 rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) - 1110 rate_set_tsf) < 1000000); 1111 rs_idx ^= rate_set_tsf & BIT(0); 1112 rs = &sta->rateset[rs_idx]; 1113 1114 if (!first_idx && rs->probe_rate.idx >= 0) { 1115 info->status.rates[0] = rs->probe_rate; 1116 1117 spin_lock_bh(&dev->mt76.lock); 1118 if (sta->rate_probe) { 1119 mt7603_wtbl_set_rates(dev, sta, NULL, 1120 sta->rates); 1121 sta->rate_probe = false; 1122 } 1123 spin_unlock_bh(&dev->mt76.lock); 1124 } else { 1125 info->status.rates[0] = rs->rates[first_idx / 2]; 1126 } 1127 info->status.rates[0].count = 0; 1128 1129 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1130 struct ieee80211_tx_rate *cur_rate; 1131 int cur_count; 1132 1133 cur_rate = &rs->rates[idx / 2]; 1134 cur_count = min_t(int, MT7603_RATE_RETRY, count); 1135 count -= cur_count; 1136 1137 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1138 cur_rate->flags != info->status.rates[i].flags)) { 1139 i++; 1140 if (i == ARRAY_SIZE(info->status.rates)) { 1141 i--; 1142 break; 1143 } 1144 1145 info->status.rates[i] = *cur_rate; 1146 info->status.rates[i].count = 0; 1147 } 1148 1149 info->status.rates[i].count += cur_count; 1150 } 1151 1152 out: 1153 final_rate_flags = info->status.rates[i].flags; 1154 1155 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1156 case MT_PHY_TYPE_CCK: 1157 cck = true; 1158 /* fall through */ 1159 case MT_PHY_TYPE_OFDM: 1160 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) 1161 sband = &dev->mphy.sband_5g.sband; 1162 else 1163 sband = &dev->mphy.sband_2g.sband; 1164 final_rate &= GENMASK(5, 0); 1165 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1166 cck); 1167 final_rate_flags = 0; 1168 break; 1169 case MT_PHY_TYPE_HT_GF: 1170 case MT_PHY_TYPE_HT: 1171 final_rate_flags |= IEEE80211_TX_RC_MCS; 1172 final_rate &= GENMASK(5, 0); 1173 if (final_rate > 15) 1174 return false; 1175 break; 1176 default: 1177 return false; 1178 } 1179 1180 info->status.rates[i].idx = final_rate; 1181 info->status.rates[i].flags = final_rate_flags; 1182 1183 return true; 1184 } 1185 1186 static bool 1187 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, 1188 __le32 *txs_data) 1189 { 1190 struct mt76_dev *mdev = &dev->mt76; 1191 struct sk_buff_head list; 1192 struct sk_buff *skb; 1193 1194 if (pid < MT_PACKET_ID_FIRST) 1195 return false; 1196 1197 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1198 1199 mt76_tx_status_lock(mdev, &list); 1200 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1201 if (skb) { 1202 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1203 1204 if (!mt7603_fill_txs(dev, sta, info, txs_data)) { 1205 ieee80211_tx_info_clear_status(info); 1206 info->status.rates[0].idx = -1; 1207 } 1208 1209 mt76_tx_status_skb_done(mdev, skb, &list); 1210 } 1211 mt76_tx_status_unlock(mdev, &list); 1212 1213 return !!skb; 1214 } 1215 1216 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) 1217 { 1218 struct ieee80211_tx_info info = {}; 1219 struct ieee80211_sta *sta = NULL; 1220 struct mt7603_sta *msta = NULL; 1221 struct mt76_wcid *wcid; 1222 __le32 *txs_data = data; 1223 u32 txs; 1224 u8 wcidx; 1225 u8 pid; 1226 1227 txs = le32_to_cpu(txs_data[4]); 1228 pid = FIELD_GET(MT_TXS4_PID, txs); 1229 txs = le32_to_cpu(txs_data[3]); 1230 wcidx = FIELD_GET(MT_TXS3_WCID, txs); 1231 1232 if (pid == MT_PACKET_ID_NO_ACK) 1233 return; 1234 1235 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) 1236 return; 1237 1238 rcu_read_lock(); 1239 1240 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1241 if (!wcid) 1242 goto out; 1243 1244 msta = container_of(wcid, struct mt7603_sta, wcid); 1245 sta = wcid_to_sta(wcid); 1246 1247 if (list_empty(&msta->poll_list)) { 1248 spin_lock_bh(&dev->sta_poll_lock); 1249 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1250 spin_unlock_bh(&dev->sta_poll_lock); 1251 } 1252 1253 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) 1254 goto out; 1255 1256 if (wcidx >= MT7603_WTBL_STA || !sta) 1257 goto out; 1258 1259 if (mt7603_fill_txs(dev, msta, &info, txs_data)) 1260 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); 1261 1262 out: 1263 rcu_read_unlock(); 1264 } 1265 1266 void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 1267 struct mt76_queue_entry *e) 1268 { 1269 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1270 struct sk_buff *skb = e->skb; 1271 1272 if (!e->txwi) { 1273 dev_kfree_skb_any(skb); 1274 return; 1275 } 1276 1277 if (qid < 4) 1278 dev->tx_hang_check = 0; 1279 1280 mt76_tx_complete_skb(mdev, skb); 1281 } 1282 1283 static bool 1284 wait_for_wpdma(struct mt7603_dev *dev) 1285 { 1286 return mt76_poll(dev, MT_WPDMA_GLO_CFG, 1287 MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 1288 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 1289 0, 1000); 1290 } 1291 1292 static void mt7603_pse_reset(struct mt7603_dev *dev) 1293 { 1294 /* Clear previous reset result */ 1295 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1296 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); 1297 1298 /* Reset PSE */ 1299 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1300 1301 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, 1302 MT_MCU_DEBUG_RESET_PSE_S, 1303 MT_MCU_DEBUG_RESET_PSE_S, 500)) { 1304 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; 1305 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1306 } else { 1307 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1308 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); 1309 } 1310 1311 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) 1312 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1313 } 1314 1315 void mt7603_mac_dma_start(struct mt7603_dev *dev) 1316 { 1317 mt7603_mac_start(dev); 1318 1319 wait_for_wpdma(dev); 1320 usleep_range(50, 100); 1321 1322 mt76_set(dev, MT_WPDMA_GLO_CFG, 1323 (MT_WPDMA_GLO_CFG_TX_DMA_EN | 1324 MT_WPDMA_GLO_CFG_RX_DMA_EN | 1325 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 1326 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); 1327 1328 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); 1329 } 1330 1331 void mt7603_mac_start(struct mt7603_dev *dev) 1332 { 1333 mt76_clear(dev, MT_ARB_SCR, 1334 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1335 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); 1336 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1337 } 1338 1339 void mt7603_mac_stop(struct mt7603_dev *dev) 1340 { 1341 mt76_set(dev, MT_ARB_SCR, 1342 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1343 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); 1344 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1345 } 1346 1347 void mt7603_pse_client_reset(struct mt7603_dev *dev) 1348 { 1349 u32 addr; 1350 1351 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + 1352 MT_CLIENT_RESET_TX); 1353 1354 /* Clear previous reset state */ 1355 mt76_clear(dev, addr, 1356 MT_CLIENT_RESET_TX_R_E_1 | 1357 MT_CLIENT_RESET_TX_R_E_2 | 1358 MT_CLIENT_RESET_TX_R_E_1_S | 1359 MT_CLIENT_RESET_TX_R_E_2_S); 1360 1361 /* Start PSE client TX abort */ 1362 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); 1363 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, 1364 MT_CLIENT_RESET_TX_R_E_1_S, 500); 1365 1366 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); 1367 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 1368 1369 /* Wait for PSE client to clear TX FIFO */ 1370 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, 1371 MT_CLIENT_RESET_TX_R_E_2_S, 500); 1372 1373 /* Clear PSE client TX abort state */ 1374 mt76_clear(dev, addr, 1375 MT_CLIENT_RESET_TX_R_E_1 | 1376 MT_CLIENT_RESET_TX_R_E_2); 1377 } 1378 1379 static void mt7603_dma_sched_reset(struct mt7603_dev *dev) 1380 { 1381 if (!is_mt7628(dev)) 1382 return; 1383 1384 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); 1385 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); 1386 } 1387 1388 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) 1389 { 1390 int beacon_int = dev->mt76.beacon_int; 1391 u32 mask = dev->mt76.mmio.irqmask; 1392 int i; 1393 1394 ieee80211_stop_queues(dev->mt76.hw); 1395 set_bit(MT76_RESET, &dev->mphy.state); 1396 1397 /* lock/unlock all queues to ensure that no tx is pending */ 1398 mt76_txq_schedule_all(&dev->mphy); 1399 1400 tasklet_disable(&dev->mt76.tx_tasklet); 1401 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 1402 napi_disable(&dev->mt76.napi[0]); 1403 napi_disable(&dev->mt76.napi[1]); 1404 napi_disable(&dev->mt76.tx_napi); 1405 1406 mutex_lock(&dev->mt76.mutex); 1407 1408 mt7603_beacon_set_timer(dev, -1, 0); 1409 1410 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || 1411 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || 1412 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || 1413 dev->cur_reset_cause == RESET_CAUSE_TX_HANG) 1414 mt7603_pse_reset(dev); 1415 1416 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1417 goto skip_dma_reset; 1418 1419 mt7603_mac_stop(dev); 1420 1421 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1422 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1423 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1424 usleep_range(1000, 2000); 1425 1426 mt7603_irq_disable(dev, mask); 1427 1428 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); 1429 1430 mt7603_pse_client_reset(dev); 1431 1432 for (i = 0; i < __MT_TXQ_MAX; i++) 1433 mt76_queue_tx_cleanup(dev, i, true); 1434 1435 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 1436 mt76_queue_rx_reset(dev, i); 1437 1438 mt7603_dma_sched_reset(dev); 1439 1440 mt7603_mac_dma_start(dev); 1441 1442 mt7603_irq_enable(dev, mask); 1443 1444 skip_dma_reset: 1445 clear_bit(MT76_RESET, &dev->mphy.state); 1446 mutex_unlock(&dev->mt76.mutex); 1447 1448 tasklet_enable(&dev->mt76.tx_tasklet); 1449 napi_enable(&dev->mt76.tx_napi); 1450 napi_schedule(&dev->mt76.tx_napi); 1451 1452 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); 1453 mt7603_beacon_set_timer(dev, -1, beacon_int); 1454 1455 napi_enable(&dev->mt76.napi[0]); 1456 napi_schedule(&dev->mt76.napi[0]); 1457 1458 napi_enable(&dev->mt76.napi[1]); 1459 napi_schedule(&dev->mt76.napi[1]); 1460 1461 ieee80211_wake_queues(dev->mt76.hw); 1462 mt76_txq_schedule_all(&dev->mphy); 1463 } 1464 1465 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) 1466 { 1467 u32 val; 1468 1469 mt76_wr(dev, MT_WPDMA_DEBUG, 1470 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | 1471 MT_WPDMA_DEBUG_SEL); 1472 1473 val = mt76_rr(dev, MT_WPDMA_DEBUG); 1474 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); 1475 } 1476 1477 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) 1478 { 1479 if (is_mt7628(dev)) 1480 return mt7603_dma_debug(dev, 9) & BIT(9); 1481 1482 return mt7603_dma_debug(dev, 2) & BIT(8); 1483 } 1484 1485 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) 1486 { 1487 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) 1488 return false; 1489 1490 return mt7603_rx_fifo_busy(dev); 1491 } 1492 1493 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) 1494 { 1495 u32 val; 1496 1497 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) 1498 return false; 1499 1500 val = mt7603_dma_debug(dev, 9); 1501 return (val & BIT(8)) && (val & 0xf) != 0xf; 1502 } 1503 1504 static bool mt7603_tx_hang(struct mt7603_dev *dev) 1505 { 1506 struct mt76_queue *q; 1507 u32 dma_idx, prev_dma_idx; 1508 int i; 1509 1510 for (i = 0; i < 4; i++) { 1511 q = dev->mt76.q_tx[i].q; 1512 1513 if (!q->queued) 1514 continue; 1515 1516 prev_dma_idx = dev->tx_dma_idx[i]; 1517 dma_idx = readl(&q->regs->dma_idx); 1518 dev->tx_dma_idx[i] = dma_idx; 1519 1520 if (dma_idx == prev_dma_idx && 1521 dma_idx != readl(&q->regs->cpu_idx)) 1522 break; 1523 } 1524 1525 return i < 4; 1526 } 1527 1528 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) 1529 { 1530 u32 addr, val; 1531 1532 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) 1533 return true; 1534 1535 if (mt7603_rx_fifo_busy(dev)) 1536 return false; 1537 1538 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); 1539 mt76_wr(dev, addr, 3); 1540 val = mt76_rr(dev, addr) >> 16; 1541 1542 if (is_mt7628(dev) && (val & 0x4001) == 0x4001) 1543 return true; 1544 1545 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; 1546 } 1547 1548 static bool 1549 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, 1550 enum mt7603_reset_cause cause, 1551 bool (*check)(struct mt7603_dev *dev)) 1552 { 1553 if (dev->reset_test == cause + 1) { 1554 dev->reset_test = 0; 1555 goto trigger; 1556 } 1557 1558 if (check) { 1559 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { 1560 *counter = 0; 1561 return false; 1562 } 1563 1564 (*counter)++; 1565 } 1566 1567 if (*counter < MT7603_WATCHDOG_TIMEOUT) 1568 return false; 1569 trigger: 1570 dev->cur_reset_cause = cause; 1571 dev->reset_cause[cause]++; 1572 return true; 1573 } 1574 1575 void mt7603_update_channel(struct mt76_dev *mdev) 1576 { 1577 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1578 struct mt76_channel_state *state; 1579 1580 state = mdev->phy.chan_state; 1581 state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA); 1582 } 1583 1584 void 1585 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) 1586 { 1587 u32 rxtd_6 = 0xd7c80000; 1588 1589 if (val == dev->ed_strict_mode) 1590 return; 1591 1592 dev->ed_strict_mode = val; 1593 1594 /* Ensure that ED/CCA does not trigger if disabled */ 1595 if (!dev->ed_monitor) 1596 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); 1597 else 1598 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); 1599 1600 if (dev->ed_monitor && !dev->ed_strict_mode) 1601 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); 1602 else 1603 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); 1604 1605 mt76_wr(dev, MT_RXTD(6), rxtd_6); 1606 1607 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, 1608 dev->ed_monitor && !dev->ed_strict_mode); 1609 } 1610 1611 static void 1612 mt7603_edcca_check(struct mt7603_dev *dev) 1613 { 1614 u32 val = mt76_rr(dev, MT_AGC(41)); 1615 ktime_t cur_time; 1616 int rssi0, rssi1; 1617 u32 active; 1618 u32 ed_busy; 1619 1620 if (!dev->ed_monitor) 1621 return; 1622 1623 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); 1624 if (rssi0 > 128) 1625 rssi0 -= 256; 1626 1627 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); 1628 if (rssi1 > 128) 1629 rssi1 -= 256; 1630 1631 if (max(rssi0, rssi1) >= -40 && 1632 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) 1633 dev->ed_strong_signal++; 1634 else if (dev->ed_strong_signal > 0) 1635 dev->ed_strong_signal--; 1636 1637 cur_time = ktime_get_boottime(); 1638 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; 1639 1640 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1641 dev->ed_time = cur_time; 1642 1643 if (!active) 1644 return; 1645 1646 if (100 * ed_busy / active > 90) { 1647 if (dev->ed_trigger < 0) 1648 dev->ed_trigger = 0; 1649 dev->ed_trigger++; 1650 } else { 1651 if (dev->ed_trigger > 0) 1652 dev->ed_trigger = 0; 1653 dev->ed_trigger--; 1654 } 1655 1656 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || 1657 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { 1658 mt7603_edcca_set_strict(dev, true); 1659 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { 1660 mt7603_edcca_set_strict(dev, false); 1661 } 1662 1663 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) 1664 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; 1665 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) 1666 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; 1667 } 1668 1669 void mt7603_cca_stats_reset(struct mt7603_dev *dev) 1670 { 1671 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1672 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1673 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); 1674 } 1675 1676 static void 1677 mt7603_adjust_sensitivity(struct mt7603_dev *dev) 1678 { 1679 u32 agc0 = dev->agc0, agc3 = dev->agc3; 1680 u32 adj; 1681 1682 if (!dev->sensitivity || dev->sensitivity < -100) { 1683 dev->sensitivity = 0; 1684 } else if (dev->sensitivity <= -84) { 1685 adj = 7 + (dev->sensitivity + 92) / 2; 1686 1687 agc0 = 0x56f0076f; 1688 agc0 |= adj << 12; 1689 agc0 |= adj << 16; 1690 agc3 = 0x81d0d5e3; 1691 } else if (dev->sensitivity <= -72) { 1692 adj = 7 + (dev->sensitivity + 80) / 2; 1693 1694 agc0 = 0x6af0006f; 1695 agc0 |= adj << 8; 1696 agc0 |= adj << 12; 1697 agc0 |= adj << 16; 1698 1699 agc3 = 0x8181d5e3; 1700 } else { 1701 if (dev->sensitivity > -54) 1702 dev->sensitivity = -54; 1703 1704 adj = 7 + (dev->sensitivity + 80) / 2; 1705 1706 agc0 = 0x7ff0000f; 1707 agc0 |= adj << 4; 1708 agc0 |= adj << 8; 1709 agc0 |= adj << 12; 1710 agc0 |= adj << 16; 1711 1712 agc3 = 0x818181e3; 1713 } 1714 1715 mt76_wr(dev, MT_AGC(0), agc0); 1716 mt76_wr(dev, MT_AGC1(0), agc0); 1717 1718 mt76_wr(dev, MT_AGC(3), agc3); 1719 mt76_wr(dev, MT_AGC1(3), agc3); 1720 } 1721 1722 static void 1723 mt7603_false_cca_check(struct mt7603_dev *dev) 1724 { 1725 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; 1726 int false_cca; 1727 int min_signal; 1728 u32 val; 1729 1730 if (!dev->dynamic_sensitivity) 1731 return; 1732 1733 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); 1734 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); 1735 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); 1736 1737 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); 1738 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); 1739 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); 1740 1741 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1742 dev->false_cca_cck = pd_cck - mdrdy_cck; 1743 1744 mt7603_cca_stats_reset(dev); 1745 1746 min_signal = mt76_get_min_avg_rssi(&dev->mt76, false); 1747 if (!min_signal) { 1748 dev->sensitivity = 0; 1749 dev->last_cca_adj = jiffies; 1750 goto out; 1751 } 1752 1753 min_signal -= 15; 1754 1755 false_cca = dev->false_cca_ofdm + dev->false_cca_cck; 1756 if (false_cca > 600 && 1757 dev->sensitivity < -100 + dev->sensitivity_limit) { 1758 if (!dev->sensitivity) 1759 dev->sensitivity = -92; 1760 else 1761 dev->sensitivity += 2; 1762 dev->last_cca_adj = jiffies; 1763 } else if (false_cca < 100 || 1764 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { 1765 dev->last_cca_adj = jiffies; 1766 if (!dev->sensitivity) 1767 goto out; 1768 1769 dev->sensitivity -= 2; 1770 } 1771 1772 if (dev->sensitivity && dev->sensitivity > min_signal) { 1773 dev->sensitivity = min_signal; 1774 dev->last_cca_adj = jiffies; 1775 } 1776 1777 out: 1778 mt7603_adjust_sensitivity(dev); 1779 } 1780 1781 void mt7603_mac_work(struct work_struct *work) 1782 { 1783 struct mt7603_dev *dev = container_of(work, struct mt7603_dev, 1784 mt76.mac_work.work); 1785 bool reset = false; 1786 int i, idx; 1787 1788 mt76_tx_status_check(&dev->mt76, NULL, false); 1789 1790 mutex_lock(&dev->mt76.mutex); 1791 1792 dev->mac_work_count++; 1793 mt76_update_survey(&dev->mt76); 1794 mt7603_edcca_check(dev); 1795 1796 for (i = 0, idx = 0; i < 2; i++) { 1797 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); 1798 1799 dev->mt76.aggr_stats[idx++] += val & 0xffff; 1800 dev->mt76.aggr_stats[idx++] += val >> 16; 1801 } 1802 1803 if (dev->mac_work_count == 10) 1804 mt7603_false_cca_check(dev); 1805 1806 if (mt7603_watchdog_check(dev, &dev->rx_pse_check, 1807 RESET_CAUSE_RX_PSE_BUSY, 1808 mt7603_rx_pse_busy) || 1809 mt7603_watchdog_check(dev, &dev->beacon_check, 1810 RESET_CAUSE_BEACON_STUCK, 1811 NULL) || 1812 mt7603_watchdog_check(dev, &dev->tx_hang_check, 1813 RESET_CAUSE_TX_HANG, 1814 mt7603_tx_hang) || 1815 mt7603_watchdog_check(dev, &dev->tx_dma_check, 1816 RESET_CAUSE_TX_BUSY, 1817 mt7603_tx_dma_busy) || 1818 mt7603_watchdog_check(dev, &dev->rx_dma_check, 1819 RESET_CAUSE_RX_BUSY, 1820 mt7603_rx_dma_busy) || 1821 mt7603_watchdog_check(dev, &dev->mcu_hang, 1822 RESET_CAUSE_MCU_HANG, 1823 NULL) || 1824 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { 1825 dev->beacon_check = 0; 1826 dev->tx_dma_check = 0; 1827 dev->tx_hang_check = 0; 1828 dev->rx_dma_check = 0; 1829 dev->rx_pse_check = 0; 1830 dev->mcu_hang = 0; 1831 dev->rx_dma_idx = ~0; 1832 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); 1833 reset = true; 1834 dev->mac_work_count = 0; 1835 } 1836 1837 if (dev->mac_work_count >= 10) 1838 dev->mac_work_count = 0; 1839 1840 mutex_unlock(&dev->mt76.mutex); 1841 1842 if (reset) 1843 mt7603_mac_watchdog_reset(dev); 1844 1845 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, 1846 msecs_to_jiffies(MT7603_WATCHDOG_TIME)); 1847 } 1848