1 /*
2  * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4  * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2
8  * as published by the Free Software Foundation
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #ifndef MT76X0U_H
17 #define MT76X0U_H
18 
19 #include <linux/bitfield.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>
22 #include <linux/mutex.h>
23 #include <linux/usb.h>
24 #include <linux/completion.h>
25 #include <net/mac80211.h>
26 #include <linux/debugfs.h>
27 
28 #include "../mt76.h"
29 #include "../mt76x02_regs.h"
30 #include "../mt76x02_mac.h"
31 
32 #define MT_CALIBRATE_INTERVAL		(4 * HZ)
33 
34 #define MT_FREQ_CAL_INIT_DELAY		(30 * HZ)
35 #define MT_FREQ_CAL_CHECK_INTERVAL	(10 * HZ)
36 #define MT_FREQ_CAL_ADJ_INTERVAL	(HZ / 2)
37 
38 #define MT_BBP_REG_VERSION		0x00
39 
40 #define MT_USB_AGGR_SIZE_LIMIT		21 /* * 1024B */
41 #define MT_USB_AGGR_TIMEOUT		0x80 /* * 33ns */
42 #define MT_RX_ORDER			3
43 #define MT_RX_URB_SIZE			(PAGE_SIZE << MT_RX_ORDER)
44 
45 struct mt76x0_dma_buf {
46 	struct urb *urb;
47 	void *buf;
48 	dma_addr_t dma;
49 	size_t len;
50 };
51 
52 struct mac_stats {
53 	u64 rx_stat[6];
54 	u64 tx_stat[6];
55 	u64 aggr_stat[2];
56 	u64 aggr_n[32];
57 	u64 zero_len_del[2];
58 };
59 
60 #define N_RX_ENTRIES	16
61 struct mt76x0_rx_queue {
62 	struct mt76x0_dev *dev;
63 
64 	struct mt76x0_dma_buf_rx {
65 		struct urb *urb;
66 		struct page *p;
67 	} e[N_RX_ENTRIES];
68 
69 	unsigned int start;
70 	unsigned int end;
71 	unsigned int entries;
72 	unsigned int pending;
73 };
74 
75 #define N_TX_ENTRIES	64
76 
77 struct mt76x0_tx_queue {
78 	struct mt76x0_dev *dev;
79 
80 	struct mt76x0_dma_buf_tx {
81 		struct urb *urb;
82 		struct sk_buff *skb;
83 	} e[N_TX_ENTRIES];
84 
85 	unsigned int start;
86 	unsigned int end;
87 	unsigned int entries;
88 	unsigned int used;
89 	unsigned int fifo_seq;
90 };
91 
92 /* WCID allocation:
93  *     0: mcast wcid
94  *     1: bssid wcid
95  *  1...: STAs
96  * ...7e: group wcids
97  *    7f: reserved
98  */
99 #define N_WCIDS		128
100 #define GROUP_WCID(idx)	(254 - idx)
101 
102 struct mt76x0_eeprom_params;
103 
104 #define MT_EE_TEMPERATURE_SLOPE		39
105 #define MT_FREQ_OFFSET_INVALID		-128
106 
107 /* addr req mask */
108 #define MT_VEND_TYPE_EEPROM	BIT(31)
109 #define MT_VEND_TYPE_CFG	BIT(30)
110 #define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
111 
112 #define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
113 
114 enum mt_bw {
115 	MT_BW_20,
116 	MT_BW_40,
117 };
118 
119 /**
120  * struct mt76x0_dev - adapter structure
121  * @lock:		protects @wcid->tx_rate.
122  * @mac_lock:		locks out mac80211's tx status and rx paths.
123  * @tx_lock:		protects @tx_q and changes of MT76_STATE_*_STATS
124  *			flags in @state.
125  * @rx_lock:		protects @rx_q.
126  * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
127  * @mutex:		ensures exclusive access from mac80211 callbacks.
128  * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
129  *			(accesses to RF and BBP).
130  * @hw_atomic_mutex:	ensures exclusive access to HW during critical
131  *			operations (power management, channel switch).
132  */
133 struct mt76x0_dev {
134 	struct mt76_dev mt76; /* must be first */
135 
136 	struct mutex usb_ctrl_mtx;
137 	u8 data[32];
138 
139 	struct tasklet_struct rx_tasklet;
140 	struct tasklet_struct tx_tasklet;
141 
142 	u8 out_ep[__MT_EP_OUT_MAX];
143 	u16 out_max_packet;
144 	u8 in_ep[__MT_EP_IN_MAX];
145 	u16 in_max_packet;
146 
147 	unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
148 	unsigned long vif_mask;
149 
150 	struct delayed_work cal_work;
151 	struct delayed_work mac_work;
152 
153 	struct workqueue_struct *stat_wq;
154 	struct delayed_work stat_work;
155 
156 	struct mt76_wcid *mon_wcid;
157 	struct mt76_wcid __rcu *wcid[N_WCIDS];
158 
159 	spinlock_t mac_lock;
160 
161 	const u16 *beacon_offsets;
162 
163 	u8 macaddr[ETH_ALEN];
164 	struct mt76x0_eeprom_params *ee;
165 
166 	struct mutex reg_atomic_mutex;
167 	struct mutex hw_atomic_mutex;
168 
169 	u32 debugfs_reg;
170 
171 	/* TX */
172 	spinlock_t tx_lock;
173 	struct mt76x0_tx_queue *tx_q;
174 	struct sk_buff_head tx_skb_done;
175 
176 	atomic_t avg_ampdu_len;
177 
178 	/* RX */
179 	spinlock_t rx_lock;
180 	struct mt76x0_rx_queue rx_q;
181 
182 	/* Connection monitoring things */
183 	spinlock_t con_mon_lock;
184 	u8 ap_bssid[ETH_ALEN];
185 
186 	s8 bcn_freq_off;
187 	u8 bcn_phy_mode;
188 
189 	int avg_rssi; /* starts at 0 and converges */
190 
191 	u8 agc_save;
192 	u16 chainmask;
193 
194 	struct mac_stats stats;
195 };
196 
197 struct mt76x0_wcid {
198 	u8 idx;
199 	u8 hw_key_idx;
200 
201 	u16 tx_rate;
202 	bool tx_rate_set;
203 	u8 tx_rate_nss;
204 };
205 
206 struct mt76x0_rxwi;
207 
208 extern const struct ieee80211_ops mt76x0_ops;
209 
210 static inline bool is_mt7610e(struct mt76x0_dev *dev)
211 {
212 	/* TODO */
213 	return false;
214 }
215 
216 void mt76x0_init_debugfs(struct mt76x0_dev *dev);
217 
218 /* Compatibility with mt76 */
219 #define mt76_rmw_field(_dev, _reg, _field, _val)	\
220 	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
221 
222 int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
223 			    const struct mt76_reg_pair *data, int len);
224 int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
225 			  struct mt76_reg_pair *data, int len);
226 int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
227 			     const u32 *data, int n);
228 void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
229 
230 /* Init */
231 struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
232 int mt76x0_init_hardware(struct mt76x0_dev *dev);
233 int mt76x0_register_device(struct mt76x0_dev *dev);
234 void mt76x0_cleanup(struct mt76x0_dev *dev);
235 void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
236 
237 int mt76x0_mac_start(struct mt76x0_dev *dev);
238 void mt76x0_mac_stop(struct mt76x0_dev *dev);
239 
240 /* PHY */
241 void mt76x0_phy_init(struct mt76x0_dev *dev);
242 int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
243 void mt76x0_agc_save(struct mt76x0_dev *dev);
244 void mt76x0_agc_restore(struct mt76x0_dev *dev);
245 int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
246 			    struct cfg80211_chan_def *chandef);
247 void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
248 int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
249 void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
250 			       struct ieee80211_bss_conf *info);
251 
252 /* MAC */
253 void mt76x0_mac_work(struct work_struct *work);
254 void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
255 				int ht_mode);
256 void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
257 void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
258 void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
259 
260 /* TX */
261 void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
262 		struct sk_buff *skb);
263 int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
264 		    u16 queue, const struct ieee80211_tx_queue_params *params);
265 void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
266 void mt76x0_tx_stat(struct work_struct *work);
267 
268 /* util */
269 void mt76x0_remove_hdr_pad(struct sk_buff *skb);
270 int mt76x0_insert_hdr_pad(struct sk_buff *skb);
271 
272 int mt76x0_dma_init(struct mt76x0_dev *dev);
273 void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
274 
275 int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
276 			   struct mt76_wcid *wcid, int hw_q);
277 
278 #endif
279