1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
20 
21 #include "mt76x02.h"
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_trace.h"
24 
25 static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
26 {
27 	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
28 	struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
29 	struct beacon_bc_data data = {};
30 	struct sk_buff *skb;
31 	int i;
32 
33 	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
34 		return;
35 
36 	mt76x02_resync_beacon_timer(dev);
37 
38 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
39 		IEEE80211_IFACE_ITER_RESUME_ALL,
40 		mt76x02_update_beacon_iter, dev);
41 
42 	mt76_csa_check(&dev->mt76);
43 
44 	if (dev->mt76.csa_complete)
45 		return;
46 
47 	mt76x02_enqueue_buffered_bc(dev, &data, 8);
48 
49 	if (!skb_queue_len(&data.q))
50 		return;
51 
52 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
53 		if (!data.tail[i])
54 			continue;
55 
56 		mt76_skb_set_moredata(data.tail[i], false);
57 	}
58 
59 	spin_lock_bh(&q->lock);
60 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
61 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
62 		struct ieee80211_vif *vif = info->control.vif;
63 		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
64 
65 		mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
66 				  NULL);
67 	}
68 	spin_unlock_bh(&q->lock);
69 }
70 
71 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
72 {
73 	if (en)
74 		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
75 	else
76 		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
77 }
78 
79 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
80 {
81 	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
82 	if (en)
83 		mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
84 	else
85 		mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
86 }
87 
88 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
89 {
90 	static const struct mt76x02_beacon_ops beacon_ops = {
91 		.nslots = 8,
92 		.slot_size = 1024,
93 		.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
94 		.beacon_enable = mt76x02e_beacon_enable,
95 	};
96 
97 	dev->beacon_ops = &beacon_ops;
98 
99 	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
100 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4);
101 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
102 		       MT_DFS_GP_INTERVAL);
103 	mt76_wr(dev, MT_INT_TIMER_EN, 0);
104 
105 	mt76x02_init_beacon_config(dev);
106 }
107 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
108 
109 static int
110 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
111 		      int idx, int n_desc)
112 {
113 	struct mt76_queue *hwq;
114 	int err;
115 
116 	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
117 	if (!hwq)
118 		return -ENOMEM;
119 
120 	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
121 	if (err < 0)
122 		return err;
123 
124 	INIT_LIST_HEAD(&q->swq);
125 	q->q = hwq;
126 
127 	mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
128 
129 	return 0;
130 }
131 
132 static int
133 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
134 		      int idx, int n_desc, int bufsize)
135 {
136 	int err;
137 
138 	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
139 			       MT_RX_RING_BASE);
140 	if (err < 0)
141 		return err;
142 
143 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
144 
145 	return 0;
146 }
147 
148 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
149 {
150 	struct mt76x02_tx_status stat;
151 	u8 update = 1;
152 
153 	while (kfifo_get(&dev->txstatus_fifo, &stat))
154 		mt76x02_send_tx_status(dev, &stat, &update);
155 }
156 
157 static void mt76x02_tx_tasklet(unsigned long data)
158 {
159 	struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
160 
161 	mt76x02_mac_poll_tx_status(dev, false);
162 	mt76x02_process_tx_status_fifo(dev);
163 
164 	mt76_txq_schedule_all(&dev->mt76);
165 }
166 
167 static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
168 {
169 	struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
170 					       mt76.tx_napi);
171 	int i;
172 
173 	mt76x02_mac_poll_tx_status(dev, false);
174 
175 	for (i = MT_TXQ_MCU; i >= 0; i--)
176 		mt76_queue_tx_cleanup(dev, i, false);
177 
178 	if (napi_complete_done(napi, 0))
179 		mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
180 
181 	for (i = MT_TXQ_MCU; i >= 0; i--)
182 		mt76_queue_tx_cleanup(dev, i, false);
183 
184 	tasklet_schedule(&dev->mt76.tx_tasklet);
185 
186 	return 0;
187 }
188 
189 int mt76x02_dma_init(struct mt76x02_dev *dev)
190 {
191 	struct mt76_txwi_cache __maybe_unused *t;
192 	int i, ret, fifo_size;
193 	struct mt76_queue *q;
194 	void *status_fifo;
195 
196 	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
197 
198 	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
199 	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
200 	if (!status_fifo)
201 		return -ENOMEM;
202 
203 	tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
204 		     (unsigned long) dev);
205 	tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
206 		     (unsigned long)dev);
207 
208 	spin_lock_init(&dev->txstatus_fifo_lock);
209 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
210 
211 	mt76_dma_attach(&dev->mt76);
212 
213 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
214 
215 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
216 		ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
217 					    mt76_ac_to_hwq(i),
218 					    MT_TX_RING_SIZE);
219 		if (ret)
220 			return ret;
221 	}
222 
223 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
224 				    MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
225 	if (ret)
226 		return ret;
227 
228 	ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
229 				    MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
230 	if (ret)
231 		return ret;
232 
233 	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
234 				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
235 	if (ret)
236 		return ret;
237 
238 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
239 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
240 	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
241 				    MT_RX_BUF_SIZE);
242 	if (ret)
243 		return ret;
244 
245 	ret = mt76_init_queues(dev);
246 	if (ret)
247 		return ret;
248 
249 	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
250 			  mt76x02_poll_tx, NAPI_POLL_WEIGHT);
251 	napi_enable(&dev->mt76.tx_napi);
252 
253 	return 0;
254 }
255 EXPORT_SYMBOL_GPL(mt76x02_dma_init);
256 
257 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
258 {
259 	struct mt76x02_dev *dev;
260 
261 	dev = container_of(mdev, struct mt76x02_dev, mt76);
262 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
263 }
264 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
265 
266 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
267 {
268 	struct mt76x02_dev *dev = dev_instance;
269 	u32 intr;
270 
271 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
272 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
273 
274 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
275 		return IRQ_NONE;
276 
277 	trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
278 
279 	intr &= dev->mt76.mmio.irqmask;
280 
281 	if (intr & MT_INT_RX_DONE(0)) {
282 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
283 		napi_schedule(&dev->mt76.napi[0]);
284 	}
285 
286 	if (intr & MT_INT_RX_DONE(1)) {
287 		mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
288 		napi_schedule(&dev->mt76.napi[1]);
289 	}
290 
291 	if (intr & MT_INT_PRE_TBTT)
292 		tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
293 
294 	/* send buffered multicast frames now */
295 	if (intr & MT_INT_TBTT) {
296 		if (dev->mt76.csa_complete)
297 			mt76_csa_finish(&dev->mt76);
298 		else
299 			mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
300 	}
301 
302 	if (intr & MT_INT_TX_STAT)
303 		mt76x02_mac_poll_tx_status(dev, true);
304 
305 	if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
306 		mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
307 		napi_schedule(&dev->mt76.tx_napi);
308 	}
309 
310 	if (intr & MT_INT_GPTIMER) {
311 		mt76x02_irq_disable(dev, MT_INT_GPTIMER);
312 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
313 	}
314 
315 	return IRQ_HANDLED;
316 }
317 EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
318 
319 static void mt76x02_dma_enable(struct mt76x02_dev *dev)
320 {
321 	u32 val;
322 
323 	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
324 	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
325 	usleep_range(50, 100);
326 
327 	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
328 	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
329 	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
330 	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
331 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
332 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
333 }
334 
335 void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
336 {
337 	tasklet_kill(&dev->mt76.tx_tasklet);
338 	mt76_dma_cleanup(&dev->mt76);
339 }
340 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
341 
342 void mt76x02_dma_disable(struct mt76x02_dev *dev)
343 {
344 	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
345 
346 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
347 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
348 	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
349 	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
350 	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
351 }
352 EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
353 
354 void mt76x02_mac_start(struct mt76x02_dev *dev)
355 {
356 	mt76x02_dma_enable(dev);
357 	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
358 	mt76_wr(dev, MT_MAC_SYS_CTRL,
359 		MT_MAC_SYS_CTRL_ENABLE_TX |
360 		MT_MAC_SYS_CTRL_ENABLE_RX);
361 	mt76x02_irq_enable(dev,
362 			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
363 			   MT_INT_TX_STAT);
364 }
365 EXPORT_SYMBOL_GPL(mt76x02_mac_start);
366 
367 static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
368 {
369 	u32 dma_idx, prev_dma_idx;
370 	struct mt76_queue *q;
371 	int i;
372 
373 	for (i = 0; i < 4; i++) {
374 		q = dev->mt76.q_tx[i].q;
375 
376 		if (!q->queued)
377 			continue;
378 
379 		prev_dma_idx = dev->mt76.tx_dma_idx[i];
380 		dma_idx = readl(&q->regs->dma_idx);
381 		dev->mt76.tx_dma_idx[i] = dma_idx;
382 
383 		if (prev_dma_idx == dma_idx)
384 			break;
385 	}
386 
387 	return i < 4;
388 }
389 
390 static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
391 			     struct ieee80211_sta *sta,
392 			     struct ieee80211_key_conf *key, void *data)
393 {
394 	struct mt76x02_dev *dev = hw->priv;
395 	struct mt76_wcid *wcid;
396 
397 	if (!sta)
398 	    return;
399 
400 	wcid = (struct mt76_wcid *) sta->drv_priv;
401 
402 	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
403 	    return;
404 
405 	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
406 }
407 
408 static void mt76x02_reset_state(struct mt76x02_dev *dev)
409 {
410 	int i;
411 
412 	lockdep_assert_held(&dev->mt76.mutex);
413 
414 	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
415 
416 	rcu_read_lock();
417 	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
418 	rcu_read_unlock();
419 
420 	for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
421 		struct ieee80211_sta *sta;
422 		struct ieee80211_vif *vif;
423 		struct mt76x02_sta *msta;
424 		struct mt76_wcid *wcid;
425 		void *priv;
426 
427 		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
428 					lockdep_is_held(&dev->mt76.mutex));
429 		if (!wcid)
430 			continue;
431 
432 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
433 		sta = container_of(priv, struct ieee80211_sta, drv_priv);
434 
435 		priv = msta->vif;
436 		vif = container_of(priv, struct ieee80211_vif, drv_priv);
437 
438 		__mt76_sta_remove(&dev->mt76, vif, sta);
439 		memset(msta, 0, sizeof(*msta));
440 	}
441 
442 	dev->vif_mask = 0;
443 	dev->mt76.beacon_mask = 0;
444 }
445 
446 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
447 {
448 	u32 mask = dev->mt76.mmio.irqmask;
449 	bool restart = dev->mt76.mcu_ops->mcu_restart;
450 	int i;
451 
452 	ieee80211_stop_queues(dev->mt76.hw);
453 	set_bit(MT76_RESET, &dev->mt76.state);
454 
455 	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
456 	tasklet_disable(&dev->mt76.tx_tasklet);
457 	napi_disable(&dev->mt76.tx_napi);
458 
459 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
460 		napi_disable(&dev->mt76.napi[i]);
461 
462 	mutex_lock(&dev->mt76.mutex);
463 
464 	if (restart)
465 		mt76x02_reset_state(dev);
466 
467 	if (dev->mt76.beacon_mask)
468 		mt76_clear(dev, MT_BEACON_TIME_CFG,
469 			   MT_BEACON_TIME_CFG_BEACON_TX |
470 			   MT_BEACON_TIME_CFG_TBTT_EN);
471 
472 	mt76x02_irq_disable(dev, mask);
473 
474 	/* perform device reset */
475 	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
476 	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
477 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
478 		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
479 	usleep_range(5000, 10000);
480 	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
481 
482 	/* let fw reset DMA */
483 	mt76_set(dev, 0x734, 0x3);
484 
485 	if (restart)
486 		mt76_mcu_restart(dev);
487 
488 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
489 		mt76_queue_tx_cleanup(dev, i, true);
490 
491 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
492 		mt76_queue_rx_reset(dev, i);
493 
494 	mt76x02_mac_start(dev);
495 
496 	if (dev->ed_monitor)
497 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
498 
499 	if (dev->mt76.beacon_mask && !restart)
500 		mt76_set(dev, MT_BEACON_TIME_CFG,
501 			 MT_BEACON_TIME_CFG_BEACON_TX |
502 			 MT_BEACON_TIME_CFG_TBTT_EN);
503 
504 	mt76x02_irq_enable(dev, mask);
505 
506 	mutex_unlock(&dev->mt76.mutex);
507 
508 	clear_bit(MT76_RESET, &dev->mt76.state);
509 
510 	tasklet_enable(&dev->mt76.tx_tasklet);
511 	napi_enable(&dev->mt76.tx_napi);
512 	napi_schedule(&dev->mt76.tx_napi);
513 
514 	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
515 
516 	for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
517 		napi_enable(&dev->mt76.napi[i]);
518 		napi_schedule(&dev->mt76.napi[i]);
519 	}
520 
521 	if (restart) {
522 		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
523 		ieee80211_restart_hw(dev->mt76.hw);
524 	} else {
525 		ieee80211_wake_queues(dev->mt76.hw);
526 		mt76_txq_schedule_all(&dev->mt76);
527 	}
528 }
529 
530 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
531 {
532 	if (mt76x02_tx_hang(dev)) {
533 		if (++dev->tx_hang_check >= MT_TX_HANG_TH)
534 			goto restart;
535 	} else {
536 		dev->tx_hang_check = 0;
537 	}
538 
539 	if (dev->mcu_timeout)
540 		goto restart;
541 
542 	return;
543 
544 restart:
545 	mt76x02_watchdog_reset(dev);
546 
547 	mutex_lock(&dev->mt76.mmio.mcu.mutex);
548 	dev->mcu_timeout = 0;
549 	mutex_unlock(&dev->mt76.mmio.mcu.mutex);
550 
551 	dev->tx_hang_reset++;
552 	dev->tx_hang_check = 0;
553 	memset(dev->mt76.tx_dma_idx, 0xff,
554 	       sizeof(dev->mt76.tx_dma_idx));
555 }
556 
557 void mt76x02_wdt_work(struct work_struct *work)
558 {
559 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
560 					       wdt_work.work);
561 
562 	mt76x02_check_tx_hang(dev);
563 
564 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
565 				     MT_WATCHDOG_TIME);
566 }
567