1 /*
2  * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4  * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2
8  * as published by the Free Software Foundation
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #ifndef MT76X0U_H
17 #define MT76X0U_H
18 
19 #include <linux/bitfield.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>
22 #include <linux/mutex.h>
23 #include <linux/usb.h>
24 #include <linux/completion.h>
25 #include <net/mac80211.h>
26 #include <linux/debugfs.h>
27 
28 #include "../mt76.h"
29 #include "regs.h"
30 
31 #define MT_CALIBRATE_INTERVAL		(4 * HZ)
32 
33 #define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
34 #define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
35 #define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
36 
37 #define MT_BBP_REG_VERSION		0x00
38 
39 #define MT_USB_AGGR_SIZE_LIMIT		21 /* * 1024B */
40 #define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
41 #define MT_RX_ORDER			3
42 #define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
43 
44 struct mt76x0_dma_buf {
45 	struct urb *urb;
46 	void *buf;
47 	dma_addr_t dma;
48 	size_t len;
49 };
50 
51 struct mt76x0_mcu {
52 	struct mutex mutex;
53 
54 	u8 msg_seq;
55 
56 	struct mt76x0_dma_buf resp;
57 	struct completion resp_cmpl;
58 
59 	struct mt76_reg_pair *reg_pairs;
60 	unsigned int reg_pairs_len;
61 	u32 reg_base;
62 	bool burst_read;
63 };
64 
65 struct mac_stats {
66 	u64 rx_stat[6];
67 	u64 tx_stat[6];
68 	u64 aggr_stat[2];
69 	u64 aggr_n[32];
70 	u64 zero_len_del[2];
71 };
72 
73 #define N_RX_ENTRIES	16
74 struct mt76x0_rx_queue {
75 	struct mt76x0_dev *dev;
76 
77 	struct mt76x0_dma_buf_rx {
78 		struct urb *urb;
79 		struct page *p;
80 	} e[N_RX_ENTRIES];
81 
82 	unsigned int start;
83 	unsigned int end;
84 	unsigned int entries;
85 	unsigned int pending;
86 };
87 
88 #define N_TX_ENTRIES	64
89 
90 struct mt76x0_tx_queue {
91 	struct mt76x0_dev *dev;
92 
93 	struct mt76x0_dma_buf_tx {
94 		struct urb *urb;
95 		struct sk_buff *skb;
96 	} e[N_TX_ENTRIES];
97 
98 	unsigned int start;
99 	unsigned int end;
100 	unsigned int entries;
101 	unsigned int used;
102 	unsigned int fifo_seq;
103 };
104 
105 /* WCID allocation:
106  *     0: mcast wcid
107  *     1: bssid wcid
108  *  1...: STAs
109  * ...7e: group wcids
110  *    7f: reserved
111  */
112 #define N_WCIDS		128
113 #define GROUP_WCID(idx)	(254 - idx)
114 
115 struct mt76x0_eeprom_params;
116 
117 #define MT_EE_TEMPERATURE_SLOPE		39
118 #define MT_FREQ_OFFSET_INVALID		-128
119 
120 /* addr req mask */
121 #define MT_VEND_TYPE_EEPROM	BIT(31)
122 #define MT_VEND_TYPE_CFG	BIT(30)
123 #define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
124 
125 #define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
126 
127 enum mt_temp_mode {
128 	MT_TEMP_MODE_NORMAL,
129 	MT_TEMP_MODE_HIGH,
130 	MT_TEMP_MODE_LOW,
131 };
132 
133 enum mt_bw {
134 	MT_BW_20,
135 	MT_BW_40,
136 };
137 
138 /**
139  * struct mt76x0_dev - adapter structure
140  * @lock:		protects @wcid->tx_rate.
141  * @mac_lock:		locks out mac80211's tx status and rx paths.
142  * @tx_lock:		protects @tx_q and changes of MT76_STATE_*_STATS
143  *			flags in @state.
144  * @rx_lock:		protects @rx_q.
145  * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
146  * @mutex:		ensures exclusive access from mac80211 callbacks.
147  * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
148  *			(accesses to RF and BBP).
149  * @hw_atomic_mutex:	ensures exclusive access to HW during critical
150  *			operations (power management, channel switch).
151  */
152 struct mt76x0_dev {
153 	struct mt76_dev mt76; /* must be first */
154 
155 	struct mutex mutex;
156 
157 	struct mutex usb_ctrl_mtx;
158 	u8 data[32];
159 
160 	struct tasklet_struct rx_tasklet;
161 	struct tasklet_struct tx_tasklet;
162 
163 	u8 out_ep[__MT_EP_OUT_MAX];
164 	u16 out_max_packet;
165 	u8 in_ep[__MT_EP_IN_MAX];
166 	u16 in_max_packet;
167 
168 	unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
169 	unsigned long vif_mask;
170 
171 	struct mt76x0_mcu mcu;
172 
173 	struct delayed_work cal_work;
174 	struct delayed_work mac_work;
175 
176 	struct workqueue_struct *stat_wq;
177 	struct delayed_work stat_work;
178 
179 	struct mt76_wcid *mon_wcid;
180 	struct mt76_wcid __rcu *wcid[N_WCIDS];
181 
182 	spinlock_t mac_lock;
183 
184 	const u16 *beacon_offsets;
185 
186 	u8 macaddr[ETH_ALEN];
187 	struct mt76x0_eeprom_params *ee;
188 
189 	struct mutex reg_atomic_mutex;
190 	struct mutex hw_atomic_mutex;
191 
192 	u32 rxfilter;
193 	u32 debugfs_reg;
194 
195 	/* TX */
196 	spinlock_t tx_lock;
197 	struct mt76x0_tx_queue *tx_q;
198 	struct sk_buff_head tx_skb_done;
199 
200 	atomic_t avg_ampdu_len;
201 
202 	/* RX */
203 	spinlock_t rx_lock;
204 	struct mt76x0_rx_queue rx_q;
205 
206 	/* Connection monitoring things */
207 	spinlock_t con_mon_lock;
208 	u8 ap_bssid[ETH_ALEN];
209 
210 	s8 bcn_freq_off;
211 	u8 bcn_phy_mode;
212 
213 	int avg_rssi; /* starts at 0 and converges */
214 
215 	u8 agc_save;
216 	u16 chainmask;
217 
218 	struct mac_stats stats;
219 };
220 
221 struct mt76x0_wcid {
222 	u8 idx;
223 	u8 hw_key_idx;
224 
225 	u16 tx_rate;
226 	bool tx_rate_set;
227 	u8 tx_rate_nss;
228 };
229 
230 struct mt76_vif {
231 	u8 idx;
232 
233 	struct mt76_wcid group_wcid;
234 };
235 
236 struct mt76_tx_status {
237 	u8 valid:1;
238 	u8 success:1;
239 	u8 aggr:1;
240 	u8 ack_req:1;
241 	u8 is_probe:1;
242 	u8 wcid;
243 	u8 pktid;
244 	u8 retry;
245 	u16 rate;
246 } __packed __aligned(2);
247 
248 struct mt76_sta {
249 	struct mt76_wcid wcid;
250 	struct mt76_tx_status status;
251 	int n_frames;
252 	u16 agg_ssn[IEEE80211_NUM_TIDS];
253 };
254 
255 struct mt76_reg_pair {
256 	u32 reg;
257 	u32 value;
258 };
259 
260 struct mt76x0_rxwi;
261 
262 extern const struct ieee80211_ops mt76x0_ops;
263 
264 static inline bool is_mt7610e(struct mt76x0_dev *dev)
265 {
266 	/* TODO */
267 	return false;
268 }
269 
270 void mt76x0_init_debugfs(struct mt76x0_dev *dev);
271 
272 int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
273 
274 /* Compatibility with mt76 */
275 #define mt76_rmw_field(_dev, _reg, _field, _val)	\
276 	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
277 
278 int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
279 			    const struct mt76_reg_pair *data, int len);
280 int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
281 			  struct mt76_reg_pair *data, int len);
282 int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
283 			     const u32 *data, int n);
284 void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
285 
286 /* Init */
287 struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
288 int mt76x0_init_hardware(struct mt76x0_dev *dev);
289 int mt76x0_register_device(struct mt76x0_dev *dev);
290 void mt76x0_cleanup(struct mt76x0_dev *dev);
291 void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
292 
293 int mt76x0_mac_start(struct mt76x0_dev *dev);
294 void mt76x0_mac_stop(struct mt76x0_dev *dev);
295 
296 /* PHY */
297 void mt76x0_phy_init(struct mt76x0_dev *dev);
298 int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
299 void mt76x0_agc_save(struct mt76x0_dev *dev);
300 void mt76x0_agc_restore(struct mt76x0_dev *dev);
301 int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
302 			    struct cfg80211_chan_def *chandef);
303 void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
304 int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
305 void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
306 			       struct ieee80211_bss_conf *info);
307 
308 /* MAC */
309 void mt76x0_mac_work(struct work_struct *work);
310 void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
311 				int ht_mode);
312 void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
313 void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
314 void
315 mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
316 void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
317 
318 /* TX */
319 void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
320 		struct sk_buff *skb);
321 int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
322 		    u16 queue, const struct ieee80211_tx_queue_params *params);
323 void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
324 void mt76x0_tx_stat(struct work_struct *work);
325 
326 /* util */
327 void mt76x0_remove_hdr_pad(struct sk_buff *skb);
328 int mt76x0_insert_hdr_pad(struct sk_buff *skb);
329 
330 int mt76x0_dma_init(struct mt76x0_dev *dev);
331 void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
332 
333 int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
334 			   struct mt76_wcid *wcid, int hw_q);
335 
336 #endif
337