1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 
13 #include "mt7615.h"
14 #include "../dma.h"
15 #include "mac.h"
16 
17 void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
18 {
19 	if (!e->txwi) {
20 		dev_kfree_skb_any(e->skb);
21 		return;
22 	}
23 
24 	/* error path */
25 	if (e->skb == DMA_DUMMY_DATA) {
26 		struct mt76_txwi_cache *t;
27 		struct mt7615_dev *dev;
28 		struct mt7615_txp_common *txp;
29 		u16 token;
30 
31 		dev = container_of(mdev, struct mt7615_dev, mt76);
32 		txp = mt7615_txwi_to_txp(mdev, e->txwi);
33 
34 		if (is_mt7615(&dev->mt76))
35 			token = le16_to_cpu(txp->fw.token);
36 		else
37 			token = le16_to_cpu(txp->hw.msdu_id[0]) &
38 				~MT_MSDU_ID_VALID;
39 
40 		t = mt76_token_put(mdev, token);
41 		e->skb = t ? t->skb : NULL;
42 	}
43 
44 	if (e->skb)
45 		mt76_tx_complete_skb(mdev, e->wcid, e->skb);
46 }
47 
48 static void
49 mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
50 		    void *txp_ptr, u32 id)
51 {
52 	struct mt7615_hw_txp *txp = txp_ptr;
53 	struct mt7615_txp_ptr *ptr = &txp->ptr[0];
54 	int i, nbuf = tx_info->nbuf - 1;
55 	u32 last_mask;
56 
57 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
58 	tx_info->nbuf = 1;
59 
60 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
61 
62 	if (is_mt7663(&dev->mt76))
63 		last_mask = MT_TXD_LEN_LAST;
64 	else
65 		last_mask = MT_TXD_LEN_AMSDU_LAST |
66 			    MT_TXD_LEN_MSDU_LAST;
67 
68 	for (i = 0; i < nbuf; i++) {
69 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
70 		u32 addr = tx_info->buf[i + 1].addr;
71 
72 		if (i == nbuf - 1)
73 			len |= last_mask;
74 
75 		if (i & 1) {
76 			ptr->buf1 = cpu_to_le32(addr);
77 			ptr->len1 = cpu_to_le16(len);
78 			ptr++;
79 		} else {
80 			ptr->buf0 = cpu_to_le32(addr);
81 			ptr->len0 = cpu_to_le16(len);
82 		}
83 	}
84 }
85 
86 static void
87 mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
88 		    void *txp_ptr, u32 id)
89 {
90 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
91 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
92 	struct ieee80211_key_conf *key = info->control.hw_key;
93 	struct ieee80211_vif *vif = info->control.vif;
94 	struct mt7615_fw_txp *txp = txp_ptr;
95 	int nbuf = tx_info->nbuf - 1;
96 	int i;
97 
98 	for (i = 0; i < nbuf; i++) {
99 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
100 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
101 	}
102 	txp->nbuf = nbuf;
103 
104 	/* pass partial skb header to fw */
105 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
106 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
107 	tx_info->buf[1].skip_unmap = true;
108 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
109 
110 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
111 
112 	if (!key)
113 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
114 
115 	if (ieee80211_is_mgmt(hdr->frame_control))
116 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
117 
118 	if (vif) {
119 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
120 
121 		txp->bss_idx = mvif->idx;
122 	}
123 
124 	txp->token = cpu_to_le16(id);
125 	txp->rept_wds_wcid = 0xff;
126 }
127 
128 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
129 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
130 			  struct ieee80211_sta *sta,
131 			  struct mt76_tx_info *tx_info)
132 {
133 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
134 	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
135 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
136 	struct ieee80211_key_conf *key = info->control.hw_key;
137 	int pid, id;
138 	u8 *txwi = (u8 *)txwi_ptr;
139 	struct mt76_txwi_cache *t;
140 	void *txp;
141 
142 	if (!wcid)
143 		wcid = &dev->mt76.global_wcid;
144 
145 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
146 
147 	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
148 		struct mt7615_phy *phy = &dev->phy;
149 
150 		if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
151 			phy = mdev->phy2->priv;
152 
153 		spin_lock_bh(&dev->mt76.lock);
154 		mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
155 				     msta->rates);
156 		spin_unlock_bh(&dev->mt76.lock);
157 	}
158 
159 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
160 	t->skb = tx_info->skb;
161 
162 	id = mt76_token_get(mdev, &t);
163 	if (id < 0)
164 		return id;
165 
166 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
167 			      pid, key, false);
168 
169 	txp = txwi + MT_TXD_SIZE;
170 	memset(txp, 0, sizeof(struct mt7615_txp_common));
171 	if (is_mt7615(&dev->mt76))
172 		mt7615_write_fw_txp(dev, tx_info, txp, id);
173 	else
174 		mt7615_write_hw_txp(dev, tx_info, txp, id);
175 
176 	tx_info->skb = DMA_DUMMY_DATA;
177 
178 	return 0;
179 }
180 
181 void mt7615_dma_reset(struct mt7615_dev *dev)
182 {
183 	int i;
184 
185 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
186 		   MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
187 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
188 
189 	usleep_range(1000, 2000);
190 
191 	for (i = 0; i < __MT_TXQ_MAX; i++)
192 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
193 
194 	for (i = 0; i < __MT_MCUQ_MAX; i++)
195 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
196 
197 	mt76_for_each_q_rx(&dev->mt76, i)
198 		mt76_queue_rx_reset(dev, i);
199 
200 	mt76_tx_status_check(&dev->mt76, NULL, true);
201 
202 	mt7615_dma_start(dev);
203 }
204 EXPORT_SYMBOL_GPL(mt7615_dma_reset);
205 
206 static void
207 mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event)
208 {
209 	u32 reg = MT_MCU_INT_EVENT;
210 
211 	if (is_mt7663(&dev->mt76))
212 		reg = MT7663_MCU_INT_EVENT;
213 
214 	mt76_wr(dev, reg, event);
215 
216 	mt7622_trigger_hif_int(dev, true);
217 	mt7622_trigger_hif_int(dev, false);
218 }
219 
220 static bool
221 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
222 {
223 	bool ret;
224 
225 	ret = wait_event_timeout(dev->reset_wait,
226 				 (READ_ONCE(dev->reset_state) & state),
227 				 MT7615_RESET_TIMEOUT);
228 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
229 	return ret;
230 }
231 
232 static void
233 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
234 {
235 	struct ieee80211_hw *hw = priv;
236 	struct mt7615_dev *dev = mt7615_hw_dev(hw);
237 
238 	switch (vif->type) {
239 	case NL80211_IFTYPE_MESH_POINT:
240 	case NL80211_IFTYPE_ADHOC:
241 	case NL80211_IFTYPE_AP:
242 		mt7615_mcu_add_beacon(dev, hw, vif,
243 				      vif->bss_conf.enable_beacon);
244 		break;
245 	default:
246 		break;
247 	}
248 }
249 
250 static void
251 mt7615_update_beacons(struct mt7615_dev *dev)
252 {
253 	ieee80211_iterate_active_interfaces(dev->mt76.hw,
254 		IEEE80211_IFACE_ITER_RESUME_ALL,
255 		mt7615_update_vif_beacon, dev->mt76.hw);
256 
257 	if (!dev->mt76.phy2)
258 		return;
259 
260 	ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
261 		IEEE80211_IFACE_ITER_RESUME_ALL,
262 		mt7615_update_vif_beacon, dev->mt76.phy2->hw);
263 }
264 
265 void mt7615_mac_reset_work(struct work_struct *work)
266 {
267 	struct mt7615_phy *phy2;
268 	struct mt76_phy *ext_phy;
269 	struct mt7615_dev *dev;
270 
271 	dev = container_of(work, struct mt7615_dev, reset_work);
272 	ext_phy = dev->mt76.phy2;
273 	phy2 = ext_phy ? ext_phy->priv : NULL;
274 
275 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
276 		return;
277 
278 	ieee80211_stop_queues(mt76_hw(dev));
279 	if (ext_phy)
280 		ieee80211_stop_queues(ext_phy->hw);
281 
282 	set_bit(MT76_RESET, &dev->mphy.state);
283 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
284 	wake_up(&dev->mt76.mcu.wait);
285 	cancel_delayed_work_sync(&dev->mphy.mac_work);
286 	del_timer_sync(&dev->phy.roc_timer);
287 	cancel_work_sync(&dev->phy.roc_work);
288 	if (phy2) {
289 		set_bit(MT76_RESET, &phy2->mt76->state);
290 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
291 		del_timer_sync(&phy2->roc_timer);
292 		cancel_work_sync(&phy2->roc_work);
293 	}
294 
295 	/* lock/unlock all queues to ensure that no tx is pending */
296 	mt76_txq_schedule_all(&dev->mphy);
297 	if (ext_phy)
298 		mt76_txq_schedule_all(ext_phy);
299 
300 	mt76_worker_disable(&dev->mt76.tx_worker);
301 	napi_disable(&dev->mt76.napi[0]);
302 	napi_disable(&dev->mt76.napi[1]);
303 	napi_disable(&dev->mt76.tx_napi);
304 
305 	mt7615_mutex_acquire(dev);
306 
307 	mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED);
308 
309 	if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
310 		mt7615_dma_reset(dev);
311 
312 		mt7615_tx_token_put(dev);
313 		idr_init(&dev->mt76.token);
314 
315 		mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
316 
317 		mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT);
318 		mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
319 	}
320 
321 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
322 	clear_bit(MT76_RESET, &dev->mphy.state);
323 	if (phy2)
324 		clear_bit(MT76_RESET, &phy2->mt76->state);
325 
326 	mt76_worker_enable(&dev->mt76.tx_worker);
327 	napi_enable(&dev->mt76.tx_napi);
328 	napi_schedule(&dev->mt76.tx_napi);
329 
330 	napi_enable(&dev->mt76.napi[0]);
331 	napi_schedule(&dev->mt76.napi[0]);
332 
333 	napi_enable(&dev->mt76.napi[1]);
334 	napi_schedule(&dev->mt76.napi[1]);
335 
336 	ieee80211_wake_queues(mt76_hw(dev));
337 	if (ext_phy)
338 		ieee80211_wake_queues(ext_phy->hw);
339 
340 	mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE);
341 	mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
342 
343 	mt7615_update_beacons(dev);
344 
345 	mt7615_mutex_release(dev);
346 
347 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
348 				     MT7615_WATCHDOG_TIME);
349 	if (phy2)
350 		ieee80211_queue_delayed_work(ext_phy->hw,
351 					     &phy2->mt76->mac_work,
352 					     MT7615_WATCHDOG_TIME);
353 
354 }
355