1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/timekeeping.h> 12 13 #include "mt7615.h" 14 #include "../dma.h" 15 #include "mac.h" 16 17 void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 18 { 19 if (!e->txwi) { 20 dev_kfree_skb_any(e->skb); 21 return; 22 } 23 24 /* error path */ 25 if (e->skb == DMA_DUMMY_DATA) { 26 struct mt76_txwi_cache *t; 27 struct mt7615_dev *dev; 28 struct mt7615_txp_common *txp; 29 u16 token; 30 31 dev = container_of(mdev, struct mt7615_dev, mt76); 32 txp = mt7615_txwi_to_txp(mdev, e->txwi); 33 34 if (is_mt7615(&dev->mt76)) 35 token = le16_to_cpu(txp->fw.token); 36 else 37 token = le16_to_cpu(txp->hw.msdu_id[0]) & 38 ~MT_MSDU_ID_VALID; 39 40 t = mt76_token_put(mdev, token); 41 e->skb = t ? t->skb : NULL; 42 } 43 44 if (e->skb) 45 mt76_tx_complete_skb(mdev, e->wcid, e->skb); 46 } 47 48 static void 49 mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, 50 void *txp_ptr, u32 id) 51 { 52 struct mt7615_hw_txp *txp = txp_ptr; 53 struct mt7615_txp_ptr *ptr = &txp->ptr[0]; 54 int i, nbuf = tx_info->nbuf - 1; 55 u32 last_mask; 56 57 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 58 tx_info->nbuf = 1; 59 60 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); 61 62 if (is_mt7663(&dev->mt76)) 63 last_mask = MT_TXD_LEN_LAST; 64 else 65 last_mask = MT_TXD_LEN_AMSDU_LAST | 66 MT_TXD_LEN_MSDU_LAST; 67 68 for (i = 0; i < nbuf; i++) { 69 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; 70 u32 addr = tx_info->buf[i + 1].addr; 71 72 if (i == nbuf - 1) 73 len |= last_mask; 74 75 if (i & 1) { 76 ptr->buf1 = cpu_to_le32(addr); 77 ptr->len1 = cpu_to_le16(len); 78 ptr++; 79 } else { 80 ptr->buf0 = cpu_to_le32(addr); 81 ptr->len0 = cpu_to_le16(len); 82 } 83 } 84 } 85 86 static void 87 mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, 88 void *txp_ptr, u32 id) 89 { 90 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 91 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 92 struct ieee80211_key_conf *key = info->control.hw_key; 93 struct ieee80211_vif *vif = info->control.vif; 94 struct mt7615_fw_txp *txp = txp_ptr; 95 int nbuf = tx_info->nbuf - 1; 96 int i; 97 98 for (i = 0; i < nbuf; i++) { 99 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 100 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 101 } 102 txp->nbuf = nbuf; 103 104 /* pass partial skb header to fw */ 105 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 106 tx_info->buf[1].len = MT_CT_PARSE_LEN; 107 tx_info->buf[1].skip_unmap = true; 108 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 109 110 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); 111 112 if (!key) 113 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 114 115 if (ieee80211_is_mgmt(hdr->frame_control)) 116 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 117 118 if (vif) { 119 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 120 121 txp->bss_idx = mvif->idx; 122 } 123 124 txp->token = cpu_to_le16(id); 125 txp->rept_wds_wcid = 0xff; 126 } 127 128 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 129 enum mt76_txq_id qid, struct mt76_wcid *wcid, 130 struct ieee80211_sta *sta, 131 struct mt76_tx_info *tx_info) 132 { 133 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 134 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 135 struct ieee80211_key_conf *key = info->control.hw_key; 136 int pid, id; 137 u8 *txwi = (u8 *)txwi_ptr; 138 struct mt76_txwi_cache *t; 139 struct mt7615_sta *msta; 140 void *txp; 141 142 msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; 143 if (!wcid) 144 wcid = &dev->mt76.global_wcid; 145 146 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 147 148 if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) { 149 struct mt7615_phy *phy = &dev->phy; 150 151 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2) 152 phy = mdev->phy2->priv; 153 154 spin_lock_bh(&dev->mt76.lock); 155 mt7615_mac_set_rates(phy, msta, &info->control.rates[0], 156 msta->rates); 157 spin_unlock_bh(&dev->mt76.lock); 158 } 159 160 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 161 t->skb = tx_info->skb; 162 163 id = mt76_token_get(mdev, &t); 164 if (id < 0) 165 return id; 166 167 mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, 168 pid, key, false); 169 170 txp = txwi + MT_TXD_SIZE; 171 memset(txp, 0, sizeof(struct mt7615_txp_common)); 172 if (is_mt7615(&dev->mt76)) 173 mt7615_write_fw_txp(dev, tx_info, txp, id); 174 else 175 mt7615_write_hw_txp(dev, tx_info, txp, id); 176 177 tx_info->skb = DMA_DUMMY_DATA; 178 179 return 0; 180 } 181 182 void mt7615_dma_reset(struct mt7615_dev *dev) 183 { 184 int i; 185 186 mt76_clear(dev, MT_WPDMA_GLO_CFG, 187 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 188 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 189 190 usleep_range(1000, 2000); 191 192 for (i = 0; i < __MT_TXQ_MAX; i++) 193 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 194 195 for (i = 0; i < __MT_MCUQ_MAX; i++) 196 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 197 198 mt76_for_each_q_rx(&dev->mt76, i) 199 mt76_queue_rx_reset(dev, i); 200 201 mt76_tx_status_check(&dev->mt76, true); 202 203 mt7615_dma_start(dev); 204 } 205 EXPORT_SYMBOL_GPL(mt7615_dma_reset); 206 207 static void 208 mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event) 209 { 210 u32 reg = MT_MCU_INT_EVENT; 211 212 if (is_mt7663(&dev->mt76)) 213 reg = MT7663_MCU_INT_EVENT; 214 215 mt76_wr(dev, reg, event); 216 217 mt7622_trigger_hif_int(dev, true); 218 mt7622_trigger_hif_int(dev, false); 219 } 220 221 static bool 222 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) 223 { 224 bool ret; 225 226 ret = wait_event_timeout(dev->reset_wait, 227 (READ_ONCE(dev->reset_state) & state), 228 MT7615_RESET_TIMEOUT); 229 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 230 return ret; 231 } 232 233 static void 234 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 235 { 236 struct ieee80211_hw *hw = priv; 237 struct mt7615_dev *dev = mt7615_hw_dev(hw); 238 239 switch (vif->type) { 240 case NL80211_IFTYPE_MESH_POINT: 241 case NL80211_IFTYPE_ADHOC: 242 case NL80211_IFTYPE_AP: 243 mt7615_mcu_add_beacon(dev, hw, vif, 244 vif->bss_conf.enable_beacon); 245 break; 246 default: 247 break; 248 } 249 } 250 251 static void 252 mt7615_update_beacons(struct mt7615_dev *dev) 253 { 254 ieee80211_iterate_active_interfaces(dev->mt76.hw, 255 IEEE80211_IFACE_ITER_RESUME_ALL, 256 mt7615_update_vif_beacon, dev->mt76.hw); 257 258 if (!dev->mt76.phy2) 259 return; 260 261 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 262 IEEE80211_IFACE_ITER_RESUME_ALL, 263 mt7615_update_vif_beacon, dev->mt76.phy2->hw); 264 } 265 266 void mt7615_mac_reset_work(struct work_struct *work) 267 { 268 struct mt7615_phy *phy2; 269 struct mt76_phy *ext_phy; 270 struct mt7615_dev *dev; 271 unsigned long timeout; 272 273 dev = container_of(work, struct mt7615_dev, reset_work); 274 ext_phy = dev->mt76.phy2; 275 phy2 = ext_phy ? ext_phy->priv : NULL; 276 277 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) 278 return; 279 280 ieee80211_stop_queues(mt76_hw(dev)); 281 if (ext_phy) 282 ieee80211_stop_queues(ext_phy->hw); 283 284 set_bit(MT76_RESET, &dev->mphy.state); 285 set_bit(MT76_MCU_RESET, &dev->mphy.state); 286 wake_up(&dev->mt76.mcu.wait); 287 cancel_delayed_work_sync(&dev->mphy.mac_work); 288 del_timer_sync(&dev->phy.roc_timer); 289 cancel_work_sync(&dev->phy.roc_work); 290 if (phy2) { 291 set_bit(MT76_RESET, &phy2->mt76->state); 292 cancel_delayed_work_sync(&phy2->mt76->mac_work); 293 del_timer_sync(&phy2->roc_timer); 294 cancel_work_sync(&phy2->roc_work); 295 } 296 297 /* lock/unlock all queues to ensure that no tx is pending */ 298 mt76_txq_schedule_all(&dev->mphy); 299 if (ext_phy) 300 mt76_txq_schedule_all(ext_phy); 301 302 mt76_worker_disable(&dev->mt76.tx_worker); 303 napi_disable(&dev->mt76.napi[0]); 304 napi_disable(&dev->mt76.napi[1]); 305 napi_disable(&dev->mt76.tx_napi); 306 307 mt7615_mutex_acquire(dev); 308 309 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED); 310 311 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 312 mt7615_dma_reset(dev); 313 314 mt7615_tx_token_put(dev); 315 idr_init(&dev->mt76.token); 316 317 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 318 319 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT); 320 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 321 } 322 323 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 324 clear_bit(MT76_RESET, &dev->mphy.state); 325 if (phy2) 326 clear_bit(MT76_RESET, &phy2->mt76->state); 327 328 mt76_worker_enable(&dev->mt76.tx_worker); 329 330 local_bh_disable(); 331 napi_enable(&dev->mt76.tx_napi); 332 napi_schedule(&dev->mt76.tx_napi); 333 334 napi_enable(&dev->mt76.napi[0]); 335 napi_schedule(&dev->mt76.napi[0]); 336 337 napi_enable(&dev->mt76.napi[1]); 338 napi_schedule(&dev->mt76.napi[1]); 339 local_bh_enable(); 340 341 ieee80211_wake_queues(mt76_hw(dev)); 342 if (ext_phy) 343 ieee80211_wake_queues(ext_phy->hw); 344 345 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE); 346 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 347 348 mt7615_update_beacons(dev); 349 350 mt7615_mutex_release(dev); 351 352 timeout = mt7615_get_macwork_timeout(dev); 353 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 354 timeout); 355 if (phy2) 356 ieee80211_queue_delayed_work(ext_phy->hw, 357 &phy2->mt76->mac_work, timeout); 358 359 } 360