1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
20 
21 #include "mt76x02.h"
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_trace.h"
24 
25 struct beacon_bc_data {
26 	struct mt76x02_dev *dev;
27 	struct sk_buff_head q;
28 	struct sk_buff *tail[8];
29 };
30 
31 static void
32 mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
33 {
34 	struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
35 	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
36 	struct sk_buff *skb = NULL;
37 
38 	if (!(dev->beacon_mask & BIT(mvif->idx)))
39 		return;
40 
41 	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
42 	if (!skb)
43 		return;
44 
45 	mt76x02_mac_set_beacon(dev, mvif->idx, skb);
46 }
47 
48 static void
49 mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
50 {
51 	struct beacon_bc_data *data = priv;
52 	struct mt76x02_dev *dev = data->dev;
53 	struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
54 	struct ieee80211_tx_info *info;
55 	struct sk_buff *skb;
56 
57 	if (!(dev->beacon_mask & BIT(mvif->idx)))
58 		return;
59 
60 	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
61 	if (!skb)
62 		return;
63 
64 	info = IEEE80211_SKB_CB(skb);
65 	info->control.vif = vif;
66 	info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
67 	mt76_skb_set_moredata(skb, true);
68 	__skb_queue_tail(&data->q, skb);
69 	data->tail[mvif->idx] = skb;
70 }
71 
72 static void
73 mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
74 {
75 	u32 timer_val = dev->beacon_int << 4;
76 
77 	dev->tbtt_count++;
78 
79 	/*
80 	 * Beacon timer drifts by 1us every tick, the timer is configured
81 	 * in 1/16 TU (64us) units.
82 	 */
83 	if (dev->tbtt_count < 63)
84 		return;
85 
86 	/*
87 	 * The updated beacon interval takes effect after two TBTT, because
88 	 * at this point the original interval has already been loaded into
89 	 * the next TBTT_TIMER value
90 	 */
91 	if (dev->tbtt_count == 63)
92 		timer_val -= 1;
93 
94 	mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
95 		       MT_BEACON_TIME_CFG_INTVAL, timer_val);
96 
97 	if (dev->tbtt_count >= 64) {
98 		dev->tbtt_count = 0;
99 		return;
100 	}
101 }
102 
103 static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
104 {
105 	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
106 	struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
107 	struct beacon_bc_data data = {};
108 	struct sk_buff *skb;
109 	int i, nframes;
110 
111 	mt76x02_resync_beacon_timer(dev);
112 
113 	data.dev = dev;
114 	__skb_queue_head_init(&data.q);
115 
116 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
117 		IEEE80211_IFACE_ITER_RESUME_ALL,
118 		mt76x02_update_beacon_iter, dev);
119 
120 	mt76_csa_check(&dev->mt76);
121 
122 	if (dev->mt76.csa_complete)
123 		return;
124 
125 	do {
126 		nframes = skb_queue_len(&data.q);
127 		ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
128 			IEEE80211_IFACE_ITER_RESUME_ALL,
129 			mt76x02_add_buffered_bc, &data);
130 	} while (nframes != skb_queue_len(&data.q) &&
131 		 skb_queue_len(&data.q) < 8);
132 
133 	if (!skb_queue_len(&data.q))
134 		return;
135 
136 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
137 		if (!data.tail[i])
138 			continue;
139 
140 		mt76_skb_set_moredata(data.tail[i], false);
141 	}
142 
143 	spin_lock_bh(&q->lock);
144 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
145 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
146 		struct ieee80211_vif *vif = info->control.vif;
147 		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
148 
149 		mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
150 				      NULL);
151 	}
152 	spin_unlock_bh(&q->lock);
153 }
154 
155 static int
156 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
157 		      int idx, int n_desc)
158 {
159 	int ret;
160 
161 	q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
162 	q->ndesc = n_desc;
163 	q->hw_idx = idx;
164 
165 	ret = mt76_queue_alloc(dev, q);
166 	if (ret)
167 		return ret;
168 
169 	mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
170 
171 	return 0;
172 }
173 
174 static int
175 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
176 		      int idx, int n_desc, int bufsize)
177 {
178 	int ret;
179 
180 	q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
181 	q->ndesc = n_desc;
182 	q->buf_size = bufsize;
183 
184 	ret = mt76_queue_alloc(dev, q);
185 	if (ret)
186 		return ret;
187 
188 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
189 
190 	return 0;
191 }
192 
193 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
194 {
195 	struct mt76x02_tx_status stat;
196 	u8 update = 1;
197 
198 	while (kfifo_get(&dev->txstatus_fifo, &stat))
199 		mt76x02_send_tx_status(dev, &stat, &update);
200 }
201 
202 static void mt76x02_tx_tasklet(unsigned long data)
203 {
204 	struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
205 	int i;
206 
207 	mt76x02_process_tx_status_fifo(dev);
208 
209 	for (i = MT_TXQ_MCU; i >= 0; i--)
210 		mt76_queue_tx_cleanup(dev, i, false);
211 
212 	mt76x02_mac_poll_tx_status(dev, false);
213 	mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
214 }
215 
216 int mt76x02_dma_init(struct mt76x02_dev *dev)
217 {
218 	struct mt76_txwi_cache __maybe_unused *t;
219 	int i, ret, fifo_size;
220 	struct mt76_queue *q;
221 	void *status_fifo;
222 
223 	BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
224 	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
225 
226 	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
227 	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
228 	if (!status_fifo)
229 		return -ENOMEM;
230 
231 	tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
232 	tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
233 		     (unsigned long)dev);
234 
235 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
236 
237 	mt76_dma_attach(&dev->mt76);
238 
239 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
240 
241 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
242 		ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
243 					    mt76_ac_to_hwq(i),
244 					    MT_TX_RING_SIZE);
245 		if (ret)
246 			return ret;
247 	}
248 
249 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
250 				    MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
251 	if (ret)
252 		return ret;
253 
254 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
255 				    MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
256 	if (ret)
257 		return ret;
258 
259 	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
260 				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
261 	if (ret)
262 		return ret;
263 
264 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
265 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
266 	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
267 				    MT_RX_BUF_SIZE);
268 	if (ret)
269 		return ret;
270 
271 	return mt76_init_queues(dev);
272 }
273 EXPORT_SYMBOL_GPL(mt76x02_dma_init);
274 
275 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
276 {
277 	struct mt76x02_dev *dev;
278 
279 	dev = container_of(mdev, struct mt76x02_dev, mt76);
280 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
281 }
282 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
283 
284 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
285 {
286 	struct mt76x02_dev *dev = dev_instance;
287 	u32 intr;
288 
289 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
290 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
291 
292 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
293 		return IRQ_NONE;
294 
295 	trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
296 
297 	intr &= dev->mt76.mmio.irqmask;
298 
299 	if (intr & MT_INT_TX_DONE_ALL) {
300 		mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
301 		tasklet_schedule(&dev->tx_tasklet);
302 	}
303 
304 	if (intr & MT_INT_RX_DONE(0)) {
305 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
306 		napi_schedule(&dev->mt76.napi[0]);
307 	}
308 
309 	if (intr & MT_INT_RX_DONE(1)) {
310 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
311 		napi_schedule(&dev->mt76.napi[1]);
312 	}
313 
314 	if (intr & MT_INT_PRE_TBTT)
315 		tasklet_schedule(&dev->pre_tbtt_tasklet);
316 
317 	/* send buffered multicast frames now */
318 	if (intr & MT_INT_TBTT) {
319 		if (dev->mt76.csa_complete)
320 			mt76_csa_finish(&dev->mt76);
321 		else
322 			mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
323 	}
324 
325 	if (intr & MT_INT_TX_STAT) {
326 		mt76x02_mac_poll_tx_status(dev, true);
327 		tasklet_schedule(&dev->tx_tasklet);
328 	}
329 
330 	if (intr & MT_INT_GPTIMER) {
331 		mt76x02_irq_disable(dev, MT_INT_GPTIMER);
332 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
333 	}
334 
335 	return IRQ_HANDLED;
336 }
337 EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
338 
339 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
340 {
341 	unsigned long flags;
342 
343 	spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
344 	dev->mt76.mmio.irqmask &= ~clear;
345 	dev->mt76.mmio.irqmask |= set;
346 	mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
347 	spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
348 }
349 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
350 
351 static void mt76x02_dma_enable(struct mt76x02_dev *dev)
352 {
353 	u32 val;
354 
355 	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
356 	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
357 	usleep_range(50, 100);
358 
359 	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
360 	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
361 	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
362 	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
363 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
364 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
365 }
366 
367 void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
368 {
369 	tasklet_kill(&dev->tx_tasklet);
370 	mt76_dma_cleanup(&dev->mt76);
371 }
372 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
373 
374 void mt76x02_dma_disable(struct mt76x02_dev *dev)
375 {
376 	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
377 
378 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
379 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
380 	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
381 	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
382 	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
383 }
384 EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
385 
386 void mt76x02_mac_start(struct mt76x02_dev *dev)
387 {
388 	mt76x02_dma_enable(dev);
389 	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
390 	mt76_wr(dev, MT_MAC_SYS_CTRL,
391 		MT_MAC_SYS_CTRL_ENABLE_TX |
392 		MT_MAC_SYS_CTRL_ENABLE_RX);
393 	mt76x02_irq_enable(dev,
394 			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
395 			   MT_INT_TX_STAT);
396 }
397 EXPORT_SYMBOL_GPL(mt76x02_mac_start);
398 
399 static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
400 {
401 	u32 dma_idx, prev_dma_idx;
402 	struct mt76_queue *q;
403 	int i;
404 
405 	for (i = 0; i < 4; i++) {
406 		q = &dev->mt76.q_tx[i];
407 
408 		if (!q->queued)
409 			continue;
410 
411 		prev_dma_idx = dev->mt76.tx_dma_idx[i];
412 		dma_idx = ioread32(&q->regs->dma_idx);
413 		dev->mt76.tx_dma_idx[i] = dma_idx;
414 
415 		if (prev_dma_idx == dma_idx)
416 			break;
417 	}
418 
419 	return i < 4;
420 }
421 
422 static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
423 			     struct ieee80211_sta *sta,
424 			     struct ieee80211_key_conf *key, void *data)
425 {
426 	struct mt76x02_dev *dev = hw->priv;
427 	struct mt76_wcid *wcid;
428 
429 	if (!sta)
430 	    return;
431 
432 	wcid = (struct mt76_wcid *) sta->drv_priv;
433 
434 	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
435 	    return;
436 
437 	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
438 }
439 
440 static void mt76x02_reset_state(struct mt76x02_dev *dev)
441 {
442 	int i;
443 
444 	lockdep_assert_held(&dev->mt76.mutex);
445 
446 	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
447 
448 	rcu_read_lock();
449 	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
450 	rcu_read_unlock();
451 
452 	for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
453 		struct ieee80211_sta *sta;
454 		struct ieee80211_vif *vif;
455 		struct mt76x02_sta *msta;
456 		struct mt76_wcid *wcid;
457 		void *priv;
458 
459 		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
460 					lockdep_is_held(&dev->mt76.mutex));
461 		if (!wcid)
462 			continue;
463 
464 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
465 		sta = container_of(priv, struct ieee80211_sta, drv_priv);
466 
467 		priv = msta->vif;
468 		vif = container_of(priv, struct ieee80211_vif, drv_priv);
469 
470 		__mt76_sta_remove(&dev->mt76, vif, sta);
471 		memset(msta, 0, sizeof(*msta));
472 	}
473 
474 	dev->vif_mask = 0;
475 	dev->beacon_mask = 0;
476 }
477 
478 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
479 {
480 	u32 mask = dev->mt76.mmio.irqmask;
481 	bool restart = dev->mt76.mcu_ops->mcu_restart;
482 	int i;
483 
484 	ieee80211_stop_queues(dev->mt76.hw);
485 	set_bit(MT76_RESET, &dev->mt76.state);
486 
487 	tasklet_disable(&dev->pre_tbtt_tasklet);
488 	tasklet_disable(&dev->tx_tasklet);
489 
490 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
491 		napi_disable(&dev->mt76.napi[i]);
492 
493 	mutex_lock(&dev->mt76.mutex);
494 
495 	if (restart)
496 		mt76x02_reset_state(dev);
497 
498 	if (dev->beacon_mask)
499 		mt76_clear(dev, MT_BEACON_TIME_CFG,
500 			   MT_BEACON_TIME_CFG_BEACON_TX |
501 			   MT_BEACON_TIME_CFG_TBTT_EN);
502 
503 	mt76x02_irq_disable(dev, mask);
504 
505 	/* perform device reset */
506 	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
507 	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
508 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
509 		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
510 	usleep_range(5000, 10000);
511 	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
512 
513 	/* let fw reset DMA */
514 	mt76_set(dev, 0x734, 0x3);
515 
516 	if (restart)
517 		dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
518 
519 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
520 		mt76_queue_tx_cleanup(dev, i, true);
521 
522 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
523 		mt76_queue_rx_reset(dev, i);
524 
525 	mt76x02_mac_start(dev);
526 
527 	if (dev->ed_monitor)
528 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
529 
530 	if (dev->beacon_mask && !restart)
531 		mt76_set(dev, MT_BEACON_TIME_CFG,
532 			 MT_BEACON_TIME_CFG_BEACON_TX |
533 			 MT_BEACON_TIME_CFG_TBTT_EN);
534 
535 	mt76x02_irq_enable(dev, mask);
536 
537 	mutex_unlock(&dev->mt76.mutex);
538 
539 	clear_bit(MT76_RESET, &dev->mt76.state);
540 
541 	tasklet_enable(&dev->tx_tasklet);
542 	tasklet_schedule(&dev->tx_tasklet);
543 
544 	tasklet_enable(&dev->pre_tbtt_tasklet);
545 
546 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
547 		napi_enable(&dev->mt76.napi[i]);
548 		napi_schedule(&dev->mt76.napi[i]);
549 	}
550 
551 	if (restart) {
552 		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
553 		ieee80211_restart_hw(dev->mt76.hw);
554 	} else {
555 		ieee80211_wake_queues(dev->mt76.hw);
556 		mt76_txq_schedule_all(&dev->mt76);
557 	}
558 }
559 
560 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
561 {
562 	if (mt76x02_tx_hang(dev)) {
563 		if (++dev->tx_hang_check >= MT_TX_HANG_TH)
564 			goto restart;
565 	} else {
566 		dev->tx_hang_check = 0;
567 	}
568 
569 	if (dev->mcu_timeout)
570 		goto restart;
571 
572 	return;
573 
574 restart:
575 	mt76x02_watchdog_reset(dev);
576 
577 	mutex_lock(&dev->mt76.mmio.mcu.mutex);
578 	dev->mcu_timeout = 0;
579 	mutex_unlock(&dev->mt76.mmio.mcu.mutex);
580 
581 	dev->tx_hang_reset++;
582 	dev->tx_hang_check = 0;
583 	memset(dev->mt76.tx_dma_idx, 0xff,
584 	       sizeof(dev->mt76.tx_dma_idx));
585 }
586 
587 void mt76x02_wdt_work(struct work_struct *work)
588 {
589 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
590 					       wdt_work.work);
591 
592 	mt76x02_check_tx_hang(dev);
593 
594 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
595 				     MT_WATCHDOG_TIME);
596 }
597