1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #ifndef __MT76_H
7 #define __MT76_H
8 
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
17 #include "util.h"
18 
19 #define MT_TX_RING_SIZE     256
20 #define MT_MCU_RING_SIZE    32
21 #define MT_RX_BUF_SIZE      2048
22 #define MT_SKB_HEAD_LEN     128
23 
24 struct mt76_dev;
25 struct mt76_phy;
26 struct mt76_wcid;
27 
28 struct mt76_reg_pair {
29 	u32 reg;
30 	u32 value;
31 };
32 
33 enum mt76_bus_type {
34 	MT76_BUS_MMIO,
35 	MT76_BUS_USB,
36 };
37 
38 struct mt76_bus_ops {
39 	u32 (*rr)(struct mt76_dev *dev, u32 offset);
40 	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
41 	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
42 	void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
43 			   int len);
44 	void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
45 			  int len);
46 	int (*wr_rp)(struct mt76_dev *dev, u32 base,
47 		     const struct mt76_reg_pair *rp, int len);
48 	int (*rd_rp)(struct mt76_dev *dev, u32 base,
49 		     struct mt76_reg_pair *rp, int len);
50 	enum mt76_bus_type type;
51 };
52 
53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
55 
56 enum mt76_txq_id {
57 	MT_TXQ_VO = IEEE80211_AC_VO,
58 	MT_TXQ_VI = IEEE80211_AC_VI,
59 	MT_TXQ_BE = IEEE80211_AC_BE,
60 	MT_TXQ_BK = IEEE80211_AC_BK,
61 	MT_TXQ_PSD,
62 	MT_TXQ_MCU,
63 	MT_TXQ_BEACON,
64 	MT_TXQ_CAB,
65 	MT_TXQ_FWDL,
66 	__MT_TXQ_MAX
67 };
68 
69 enum mt76_rxq_id {
70 	MT_RXQ_MAIN,
71 	MT_RXQ_MCU,
72 	__MT_RXQ_MAX
73 };
74 
75 struct mt76_queue_buf {
76 	dma_addr_t addr;
77 	int len;
78 };
79 
80 struct mt76_tx_info {
81 	struct mt76_queue_buf buf[32];
82 	struct sk_buff *skb;
83 	int nbuf;
84 	u32 info;
85 };
86 
87 struct mt76_queue_entry {
88 	union {
89 		void *buf;
90 		struct sk_buff *skb;
91 	};
92 	union {
93 		struct mt76_txwi_cache *txwi;
94 		struct urb *urb;
95 	};
96 	enum mt76_txq_id qid;
97 	bool skip_buf0:1;
98 	bool schedule:1;
99 	bool done:1;
100 };
101 
102 struct mt76_queue_regs {
103 	u32 desc_base;
104 	u32 ring_size;
105 	u32 cpu_idx;
106 	u32 dma_idx;
107 } __packed __aligned(4);
108 
109 struct mt76_queue {
110 	struct mt76_queue_regs __iomem *regs;
111 
112 	spinlock_t lock;
113 	struct mt76_queue_entry *entry;
114 	struct mt76_desc *desc;
115 
116 	u16 first;
117 	u16 head;
118 	u16 tail;
119 	int ndesc;
120 	int queued;
121 	int buf_size;
122 	bool stopped;
123 
124 	u8 buf_offset;
125 	u8 hw_idx;
126 
127 	dma_addr_t desc_dma;
128 	struct sk_buff *rx_head;
129 	struct page_frag_cache rx_page;
130 };
131 
132 struct mt76_sw_queue {
133 	struct mt76_queue *q;
134 
135 	struct list_head swq;
136 	int swq_queued;
137 };
138 
139 struct mt76_mcu_ops {
140 	int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
141 			    int len, bool wait_resp);
142 	int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
143 			 const struct mt76_reg_pair *rp, int len);
144 	int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
145 			 struct mt76_reg_pair *rp, int len);
146 	int (*mcu_restart)(struct mt76_dev *dev);
147 };
148 
149 struct mt76_queue_ops {
150 	int (*init)(struct mt76_dev *dev);
151 
152 	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
153 		     int idx, int n_desc, int bufsize,
154 		     u32 ring_base);
155 
156 	int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
157 			    struct sk_buff *skb, struct mt76_wcid *wcid,
158 			    struct ieee80211_sta *sta);
159 
160 	int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
161 				struct sk_buff *skb, u32 tx_info);
162 
163 	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
164 			 int *len, u32 *info, bool *more);
165 
166 	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
167 
168 	void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
169 			   bool flush);
170 
171 	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
172 };
173 
174 enum mt76_wcid_flags {
175 	MT_WCID_FLAG_CHECK_PS,
176 	MT_WCID_FLAG_PS,
177 };
178 
179 #define MT76_N_WCIDS 128
180 
181 /* stored in ieee80211_tx_info::hw_queue */
182 #define MT_TX_HW_QUEUE_EXT_PHY		BIT(3)
183 
184 DECLARE_EWMA(signal, 10, 8);
185 
186 #define MT_WCID_TX_INFO_RATE		GENMASK(15, 0)
187 #define MT_WCID_TX_INFO_NSS		GENMASK(17, 16)
188 #define MT_WCID_TX_INFO_TXPWR_ADJ	GENMASK(25, 18)
189 #define MT_WCID_TX_INFO_SET		BIT(31)
190 
191 struct mt76_wcid {
192 	struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
193 
194 	unsigned long flags;
195 
196 	struct ewma_signal rssi;
197 	int inactive_count;
198 
199 	u8 idx;
200 	u8 hw_key_idx;
201 
202 	u8 sta:1;
203 	u8 ext_phy:1;
204 
205 	u8 rx_check_pn;
206 	u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
207 	u16 cipher;
208 
209 	u32 tx_info;
210 	bool sw_iv;
211 
212 	u8 packet_id;
213 };
214 
215 struct mt76_txq {
216 	struct mt76_sw_queue *swq;
217 	struct mt76_wcid *wcid;
218 
219 	struct sk_buff_head retry_q;
220 
221 	u16 agg_ssn;
222 	bool send_bar;
223 	bool aggr;
224 };
225 
226 struct mt76_txwi_cache {
227 	struct list_head list;
228 	dma_addr_t dma_addr;
229 
230 	struct sk_buff *skb;
231 };
232 
233 struct mt76_rx_tid {
234 	struct rcu_head rcu_head;
235 
236 	struct mt76_dev *dev;
237 
238 	spinlock_t lock;
239 	struct delayed_work reorder_work;
240 
241 	u16 head;
242 	u8 size;
243 	u8 nframes;
244 
245 	u8 num;
246 
247 	u8 started:1, stopped:1, timer_pending:1;
248 
249 	struct sk_buff *reorder_buf[];
250 };
251 
252 #define MT_TX_CB_DMA_DONE		BIT(0)
253 #define MT_TX_CB_TXS_DONE		BIT(1)
254 #define MT_TX_CB_TXS_FAILED		BIT(2)
255 
256 #define MT_PACKET_ID_MASK		GENMASK(6, 0)
257 #define MT_PACKET_ID_NO_ACK		0
258 #define MT_PACKET_ID_NO_SKB		1
259 #define MT_PACKET_ID_FIRST		2
260 #define MT_PACKET_ID_HAS_RATE		BIT(7)
261 
262 #define MT_TX_STATUS_SKB_TIMEOUT	HZ
263 
264 struct mt76_tx_cb {
265 	unsigned long jiffies;
266 	u8 wcid;
267 	u8 pktid;
268 	u8 flags;
269 };
270 
271 enum {
272 	MT76_STATE_INITIALIZED,
273 	MT76_STATE_RUNNING,
274 	MT76_STATE_MCU_RUNNING,
275 	MT76_SCANNING,
276 	MT76_RESET,
277 	MT76_REMOVED,
278 	MT76_READING_STATS,
279 };
280 
281 struct mt76_hw_cap {
282 	bool has_2ghz;
283 	bool has_5ghz;
284 };
285 
286 #define MT_DRV_TXWI_NO_FREE		BIT(0)
287 #define MT_DRV_TX_ALIGNED4_SKBS		BIT(1)
288 #define MT_DRV_SW_RX_AIRTIME		BIT(2)
289 
290 struct mt76_driver_ops {
291 	u32 drv_flags;
292 	u32 survey_flags;
293 	u16 txwi_size;
294 
295 	void (*update_survey)(struct mt76_dev *dev);
296 
297 	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
298 			      enum mt76_txq_id qid, struct mt76_wcid *wcid,
299 			      struct ieee80211_sta *sta,
300 			      struct mt76_tx_info *tx_info);
301 
302 	void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
303 				struct mt76_queue_entry *e);
304 
305 	bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
306 
307 	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
308 		       struct sk_buff *skb);
309 
310 	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
311 
312 	void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
313 		       bool ps);
314 
315 	int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
316 		       struct ieee80211_sta *sta);
317 
318 	void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
319 			  struct ieee80211_sta *sta);
320 
321 	void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
322 			   struct ieee80211_sta *sta);
323 };
324 
325 struct mt76_channel_state {
326 	u64 cc_active;
327 	u64 cc_busy;
328 	u64 cc_rx;
329 	u64 cc_bss_rx;
330 	u64 cc_tx;
331 };
332 
333 struct mt76_sband {
334 	struct ieee80211_supported_band sband;
335 	struct mt76_channel_state *chan;
336 };
337 
338 struct mt76_rate_power {
339 	union {
340 		struct {
341 			s8 cck[4];
342 			s8 ofdm[8];
343 			s8 stbc[10];
344 			s8 ht[16];
345 			s8 vht[10];
346 		};
347 		s8 all[48];
348 	};
349 };
350 
351 /* addr req mask */
352 #define MT_VEND_TYPE_EEPROM	BIT(31)
353 #define MT_VEND_TYPE_CFG	BIT(30)
354 #define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
355 
356 #define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
357 enum mt_vendor_req {
358 	MT_VEND_DEV_MODE =	0x1,
359 	MT_VEND_WRITE =		0x2,
360 	MT_VEND_MULTI_WRITE =	0x6,
361 	MT_VEND_MULTI_READ =	0x7,
362 	MT_VEND_READ_EEPROM =	0x9,
363 	MT_VEND_WRITE_FCE =	0x42,
364 	MT_VEND_WRITE_CFG =	0x46,
365 	MT_VEND_READ_CFG =	0x47,
366 };
367 
368 enum mt76u_in_ep {
369 	MT_EP_IN_PKT_RX,
370 	MT_EP_IN_CMD_RESP,
371 	__MT_EP_IN_MAX,
372 };
373 
374 enum mt76u_out_ep {
375 	MT_EP_OUT_INBAND_CMD,
376 	MT_EP_OUT_AC_BE,
377 	MT_EP_OUT_AC_BK,
378 	MT_EP_OUT_AC_VI,
379 	MT_EP_OUT_AC_VO,
380 	MT_EP_OUT_HCCA,
381 	__MT_EP_OUT_MAX,
382 };
383 
384 #define MT_TX_SG_MAX_SIZE	8
385 #define MT_RX_SG_MAX_SIZE	1
386 #define MT_NUM_TX_ENTRIES	256
387 #define MT_NUM_RX_ENTRIES	128
388 #define MCU_RESP_URB_SIZE	1024
389 struct mt76_usb {
390 	struct mutex usb_ctrl_mtx;
391 	union {
392 		u8 data[128];
393 		__le32 reg_val;
394 	};
395 
396 	struct tasklet_struct rx_tasklet;
397 	struct workqueue_struct *stat_wq;
398 	struct work_struct stat_work;
399 
400 	u8 out_ep[__MT_EP_OUT_MAX];
401 	u8 in_ep[__MT_EP_IN_MAX];
402 	bool sg_en;
403 
404 	struct mt76u_mcu {
405 		struct mutex mutex;
406 		u8 *data;
407 		u32 msg_seq;
408 
409 		/* multiple reads */
410 		struct mt76_reg_pair *rp;
411 		int rp_len;
412 		u32 base;
413 		bool burst;
414 	} mcu;
415 };
416 
417 struct mt76_mmio {
418 	struct mt76e_mcu {
419 		struct mutex mutex;
420 
421 		wait_queue_head_t wait;
422 		struct sk_buff_head res_q;
423 
424 		u32 msg_seq;
425 	} mcu;
426 	void __iomem *regs;
427 	spinlock_t irq_lock;
428 	u32 irqmask;
429 };
430 
431 struct mt76_rx_status {
432 	union {
433 		struct mt76_wcid *wcid;
434 		u8 wcid_idx;
435 	};
436 
437 	unsigned long reorder_time;
438 
439 	u32 ampdu_ref;
440 
441 	u8 iv[6];
442 
443 	u8 ext_phy:1;
444 	u8 aggr:1;
445 	u8 tid;
446 	u16 seqno;
447 
448 	u16 freq;
449 	u32 flag;
450 	u8 enc_flags;
451 	u8 encoding:2, bw:3;
452 	u8 rate_idx;
453 	u8 nss;
454 	u8 band;
455 	s8 signal;
456 	u8 chains;
457 	s8 chain_signal[IEEE80211_MAX_CHAINS];
458 };
459 
460 struct mt76_phy {
461 	struct ieee80211_hw *hw;
462 	struct mt76_dev *dev;
463 	void *priv;
464 
465 	unsigned long state;
466 
467 	struct cfg80211_chan_def chandef;
468 	struct ieee80211_channel *main_chan;
469 
470 	struct mt76_channel_state *chan_state;
471 	ktime_t survey_time;
472 
473 	struct mt76_sband sband_2g;
474 	struct mt76_sband sband_5g;
475 
476 	int txpower_cur;
477 	u8 antenna_mask;
478 };
479 
480 struct mt76_dev {
481 	struct mt76_phy phy; /* must be first */
482 
483 	struct mt76_phy *phy2;
484 
485 	struct ieee80211_hw *hw;
486 
487 	spinlock_t lock;
488 	spinlock_t cc_lock;
489 
490 	u32 cur_cc_bss_rx;
491 
492 	struct mt76_rx_status rx_ampdu_status;
493 	u32 rx_ampdu_len;
494 	u32 rx_ampdu_ref;
495 
496 	struct mutex mutex;
497 
498 	const struct mt76_bus_ops *bus;
499 	const struct mt76_driver_ops *drv;
500 	const struct mt76_mcu_ops *mcu_ops;
501 	struct device *dev;
502 
503 	struct net_device napi_dev;
504 	spinlock_t rx_lock;
505 	struct napi_struct napi[__MT_RXQ_MAX];
506 	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
507 
508 	struct list_head txwi_cache;
509 	struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
510 	struct mt76_queue q_rx[__MT_RXQ_MAX];
511 	const struct mt76_queue_ops *queue_ops;
512 	int tx_dma_idx[4];
513 
514 	struct tasklet_struct tx_tasklet;
515 	struct napi_struct tx_napi;
516 	struct delayed_work mac_work;
517 
518 	wait_queue_head_t tx_wait;
519 	struct sk_buff_head status_list;
520 
521 	unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
522 	unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
523 
524 	struct mt76_wcid global_wcid;
525 	struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
526 
527 	u8 macaddr[ETH_ALEN];
528 	u32 rev;
529 
530 	u32 aggr_stats[32];
531 
532 	struct tasklet_struct pre_tbtt_tasklet;
533 	int beacon_int;
534 	u8 beacon_mask;
535 
536 	struct debugfs_blob_wrapper eeprom;
537 	struct debugfs_blob_wrapper otp;
538 	struct mt76_hw_cap cap;
539 
540 	struct mt76_rate_power rate_power;
541 
542 	enum nl80211_dfs_regions region;
543 
544 	u32 debugfs_reg;
545 
546 	struct led_classdev led_cdev;
547 	char led_name[32];
548 	bool led_al;
549 	u8 led_pin;
550 
551 	u8 csa_complete;
552 
553 	u32 rxfilter;
554 
555 	union {
556 		struct mt76_mmio mmio;
557 		struct mt76_usb usb;
558 	};
559 };
560 
561 enum mt76_phy_type {
562 	MT_PHY_TYPE_CCK,
563 	MT_PHY_TYPE_OFDM,
564 	MT_PHY_TYPE_HT,
565 	MT_PHY_TYPE_HT_GF,
566 	MT_PHY_TYPE_VHT,
567 };
568 
569 #define __mt76_rr(dev, ...)	(dev)->bus->rr((dev), __VA_ARGS__)
570 #define __mt76_wr(dev, ...)	(dev)->bus->wr((dev), __VA_ARGS__)
571 #define __mt76_rmw(dev, ...)	(dev)->bus->rmw((dev), __VA_ARGS__)
572 #define __mt76_wr_copy(dev, ...)	(dev)->bus->write_copy((dev), __VA_ARGS__)
573 #define __mt76_rr_copy(dev, ...)	(dev)->bus->read_copy((dev), __VA_ARGS__)
574 
575 #define __mt76_set(dev, offset, val)	__mt76_rmw(dev, offset, 0, val)
576 #define __mt76_clear(dev, offset, val)	__mt76_rmw(dev, offset, val, 0)
577 
578 #define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
579 #define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
580 #define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
581 #define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
582 #define mt76_rr_copy(dev, ...)	(dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
583 #define mt76_wr_rp(dev, ...)	(dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
584 #define mt76_rd_rp(dev, ...)	(dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
585 
586 #define mt76_mcu_send_msg(dev, ...)	(dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
587 #define __mt76_mcu_send_msg(dev, ...)	(dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
588 #define mt76_mcu_restart(dev, ...)	(dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
589 #define __mt76_mcu_restart(dev, ...)	(dev)->mcu_ops->mcu_restart((dev))
590 
591 #define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
592 #define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
593 
594 #define mt76_get_field(_dev, _reg, _field)		\
595 	FIELD_GET(_field, mt76_rr(dev, _reg))
596 
597 #define mt76_rmw_field(_dev, _reg, _field, _val)	\
598 	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
599 
600 #define __mt76_rmw_field(_dev, _reg, _field, _val)	\
601 	__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
602 
603 #define mt76_hw(dev) (dev)->mphy.hw
604 
605 static inline struct ieee80211_hw *
606 mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
607 {
608 	if (wcid <= MT76_N_WCIDS &&
609 	    mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
610 		return dev->phy2->hw;
611 
612 	return dev->phy.hw;
613 }
614 
615 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
616 		 int timeout);
617 
618 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
619 
620 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
621 		      int timeout);
622 
623 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
624 
625 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
626 void mt76_pci_disable_aspm(struct pci_dev *pdev);
627 
628 static inline u16 mt76_chip(struct mt76_dev *dev)
629 {
630 	return dev->rev >> 16;
631 }
632 
633 static inline u16 mt76_rev(struct mt76_dev *dev)
634 {
635 	return dev->rev & 0xffff;
636 }
637 
638 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
639 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
640 
641 #define mt76_init_queues(dev)		(dev)->mt76.queue_ops->init(&((dev)->mt76))
642 #define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
643 #define mt76_tx_queue_skb_raw(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
644 #define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
645 #define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
646 #define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
647 #define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
648 
649 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
650 				   const struct ieee80211_ops *ops,
651 				   const struct mt76_driver_ops *drv_ops);
652 int mt76_register_device(struct mt76_dev *dev, bool vht,
653 			 struct ieee80211_rate *rates, int n_rates);
654 void mt76_unregister_device(struct mt76_dev *dev);
655 void mt76_free_device(struct mt76_dev *dev);
656 void mt76_unregister_phy(struct mt76_phy *phy);
657 
658 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
659 				const struct ieee80211_ops *ops);
660 int mt76_register_phy(struct mt76_phy *phy);
661 
662 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
663 int mt76_queues_read(struct seq_file *s, void *data);
664 void mt76_seq_puts_array(struct seq_file *file, const char *str,
665 			 s8 *val, int len);
666 
667 int mt76_eeprom_init(struct mt76_dev *dev, int len);
668 void mt76_eeprom_override(struct mt76_dev *dev);
669 
670 static inline struct mt76_phy *
671 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
672 {
673 	if (phy_ext && dev->phy2)
674 		return dev->phy2;
675 	return &dev->phy;
676 }
677 
678 static inline struct ieee80211_hw *
679 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
680 {
681 	return mt76_dev_phy(dev, phy_ext)->hw;
682 }
683 
684 static inline u8 *
685 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
686 {
687 	return (u8 *)t - dev->drv->txwi_size;
688 }
689 
690 /* increment with wrap-around */
691 static inline int mt76_incr(int val, int size)
692 {
693 	return (val + 1) & (size - 1);
694 }
695 
696 /* decrement with wrap-around */
697 static inline int mt76_decr(int val, int size)
698 {
699 	return (val - 1) & (size - 1);
700 }
701 
702 u8 mt76_ac_to_hwq(u8 ac);
703 
704 static inline struct ieee80211_txq *
705 mtxq_to_txq(struct mt76_txq *mtxq)
706 {
707 	void *ptr = mtxq;
708 
709 	return container_of(ptr, struct ieee80211_txq, drv_priv);
710 }
711 
712 static inline struct ieee80211_sta *
713 wcid_to_sta(struct mt76_wcid *wcid)
714 {
715 	void *ptr = wcid;
716 
717 	if (!wcid || !wcid->sta)
718 		return NULL;
719 
720 	return container_of(ptr, struct ieee80211_sta, drv_priv);
721 }
722 
723 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
724 {
725 	BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
726 		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
727 	return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
728 }
729 
730 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
731 {
732 	int len = ieee80211_get_hdrlen_from_skb(skb);
733 
734 	if (len % 4 == 0)
735 		return;
736 
737 	skb_push(skb, 2);
738 	memmove(skb->data, skb->data + 2, len);
739 
740 	skb->data[len] = 0;
741 	skb->data[len + 1] = 0;
742 }
743 
744 static inline bool mt76_is_skb_pktid(u8 pktid)
745 {
746 	if (pktid & MT_PACKET_ID_HAS_RATE)
747 		return false;
748 
749 	return pktid >= MT_PACKET_ID_FIRST;
750 }
751 
752 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
753 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
754 	     struct mt76_wcid *wcid, struct sk_buff *skb);
755 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
756 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
757 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
758 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
759 			 bool send_bar);
760 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
761 void mt76_txq_schedule_all(struct mt76_phy *phy);
762 void mt76_tx_tasklet(unsigned long data);
763 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
764 				  struct ieee80211_sta *sta,
765 				  u16 tids, int nframes,
766 				  enum ieee80211_frame_release_type reason,
767 				  bool more_data);
768 bool mt76_has_tx_pending(struct mt76_phy *phy);
769 void mt76_set_channel(struct mt76_phy *phy);
770 void mt76_update_survey(struct mt76_dev *dev);
771 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
772 		    struct survey_info *survey);
773 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
774 
775 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
776 		       u16 ssn, u8 size);
777 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
778 
779 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
780 			 struct ieee80211_key_conf *key);
781 
782 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
783 			 __acquires(&dev->status_list.lock);
784 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
785 			   __releases(&dev->status_list.lock);
786 
787 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
788 			   struct sk_buff *skb);
789 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
790 				       struct mt76_wcid *wcid, int pktid,
791 				       struct sk_buff_head *list);
792 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
793 			     struct sk_buff_head *list);
794 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
795 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
796 			  bool flush);
797 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
798 		   struct ieee80211_sta *sta,
799 		   enum ieee80211_sta_state old_state,
800 		   enum ieee80211_sta_state new_state);
801 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
802 		       struct ieee80211_sta *sta);
803 
804 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
805 
806 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
807 		     int *dbm);
808 
809 void mt76_csa_check(struct mt76_dev *dev);
810 void mt76_csa_finish(struct mt76_dev *dev);
811 
812 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
813 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
814 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
815 int mt76_get_rate(struct mt76_dev *dev,
816 		  struct ieee80211_supported_band *sband,
817 		  int idx, bool cck);
818 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
819 		  const u8 *mac);
820 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
821 			   struct ieee80211_vif *vif);
822 u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
823 			 int len);
824 
825 /* internal */
826 static inline struct ieee80211_hw *
827 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
828 {
829 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
830 	struct ieee80211_hw *hw = dev->phy.hw;
831 
832 	if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
833 		hw = dev->phy2->hw;
834 
835 	info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
836 
837 	return hw;
838 }
839 
840 void mt76_tx_free(struct mt76_dev *dev);
841 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
842 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
843 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
844 		      struct napi_struct *napi);
845 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
846 			   struct napi_struct *napi);
847 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
848 u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
849 			 int len);
850 
851 /* usb */
852 static inline bool mt76u_urb_error(struct urb *urb)
853 {
854 	return urb->status &&
855 	       urb->status != -ECONNRESET &&
856 	       urb->status != -ESHUTDOWN &&
857 	       urb->status != -ENOENT;
858 }
859 
860 /* Map hardware queues to usb endpoints */
861 static inline u8 q2ep(u8 qid)
862 {
863 	/* TODO: take management packets to queue 5 */
864 	return qid + 1;
865 }
866 
867 static inline int
868 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
869 	       int timeout)
870 {
871 	struct usb_interface *uintf = to_usb_interface(dev->dev);
872 	struct usb_device *udev = interface_to_usbdev(uintf);
873 	struct mt76_usb *usb = &dev->usb;
874 	unsigned int pipe;
875 
876 	if (actual_len)
877 		pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
878 	else
879 		pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
880 
881 	return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
882 }
883 
884 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
885 			 u8 req_type, u16 val, u16 offset,
886 			 void *buf, size_t len);
887 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
888 		     const u16 offset, const u32 val);
889 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
890 void mt76u_deinit(struct mt76_dev *dev);
891 int mt76u_alloc_queues(struct mt76_dev *dev);
892 void mt76u_stop_tx(struct mt76_dev *dev);
893 void mt76u_stop_rx(struct mt76_dev *dev);
894 int mt76u_resume_rx(struct mt76_dev *dev);
895 void mt76u_queues_deinit(struct mt76_dev *dev);
896 
897 struct sk_buff *
898 mt76_mcu_msg_alloc(const void *data, int head_len,
899 		   int data_len, int tail_len);
900 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
901 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
902 				      unsigned long expires);
903 
904 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
905 
906 #endif
907