1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 
19 #include "mt76x2u.h"
20 #include "eeprom.h"
21 #include "../mt76x02_phy.h"
22 #include "../mt76x02_usb.h"
23 
24 static void mt76x2u_init_dma(struct mt76x02_dev *dev)
25 {
26 	u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
27 
28 	val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
29 	       MT_USB_DMA_CFG_RX_BULK_EN |
30 	       MT_USB_DMA_CFG_TX_BULK_EN;
31 
32 	/* disable AGGR_BULK_RX in order to receive one
33 	 * frame in each rx urb and avoid copies
34 	 */
35 	val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
36 	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
37 }
38 
39 static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
40 {
41 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
42 	udelay(1);
43 
44 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
45 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
46 
47 	mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
48 	udelay(1);
49 
50 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
51 	usleep_range(150, 200);
52 
53 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
54 	usleep_range(50, 100);
55 
56 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
57 }
58 
59 static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
60 {
61 	int shift = unit ? 8 : 0;
62 	u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
63 
64 	/* Enable RF BG */
65 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
66 	usleep_range(10, 20);
67 
68 	/* Enable RFDIG LDO/AFE/ABB/ADDA */
69 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
70 	usleep_range(10, 20);
71 
72 	/* Switch RFDIG power to internal LDO */
73 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
74 	usleep_range(10, 20);
75 
76 	mt76x2u_power_on_rf_patch(dev);
77 
78 	mt76_set(dev, 0x530, 0xf);
79 }
80 
81 static void mt76x2u_power_on(struct mt76x02_dev *dev)
82 {
83 	u32 val;
84 
85 	/* Turn on WL MTCMOS */
86 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
87 		 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
88 
89 	val = MT_WLAN_MTC_CTRL_STATE_UP |
90 	      MT_WLAN_MTC_CTRL_PWR_ACK |
91 	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
92 
93 	mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
94 
95 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
96 	usleep_range(10, 20);
97 
98 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
99 	usleep_range(10, 20);
100 
101 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
102 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
103 
104 	/* Turn on AD/DA power down */
105 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
106 
107 	/* WLAN function enable */
108 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
109 
110 	/* Release BBP software reset */
111 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
112 
113 	mt76x2u_power_on_rf(dev, 0);
114 	mt76x2u_power_on_rf(dev, 1);
115 }
116 
117 static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
118 {
119 	u32 val, i;
120 
121 	dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
122 					     MT7612U_EEPROM_SIZE,
123 					     GFP_KERNEL);
124 	dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
125 	if (!dev->mt76.eeprom.data)
126 		return -ENOMEM;
127 
128 	for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
129 		val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
130 		put_unaligned_le32(val, dev->mt76.eeprom.data + i);
131 	}
132 
133 	mt76x02_eeprom_parse_hw_cap(dev);
134 	return 0;
135 }
136 
137 struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
138 {
139 	static const struct mt76_driver_ops drv_ops = {
140 		.tx_prepare_skb = mt76x02u_tx_prepare_skb,
141 		.tx_complete_skb = mt76x02u_tx_complete_skb,
142 		.tx_status_data = mt76x02_tx_status_data,
143 		.rx_skb = mt76x02_queue_rx_skb,
144 	};
145 	struct mt76x02_dev *dev;
146 	struct mt76_dev *mdev;
147 
148 	mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
149 	if (!mdev)
150 		return NULL;
151 
152 	dev = container_of(mdev, struct mt76x02_dev, mt76);
153 	mdev->dev = pdev;
154 	mdev->drv = &drv_ops;
155 
156 	return dev;
157 }
158 
159 static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
160 {
161 	mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
162 	mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
163 	mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
164 	mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
165 }
166 
167 int mt76x2u_init_hardware(struct mt76x02_dev *dev)
168 {
169 	const struct mt76_wcid_addr addr = {
170 		.macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
171 		.ba_mask = 0,
172 	};
173 	int i, err;
174 
175 	mt76x2_reset_wlan(dev, true);
176 	mt76x2u_power_on(dev);
177 
178 	if (!mt76x02_wait_for_mac(&dev->mt76))
179 		return -ETIMEDOUT;
180 
181 	err = mt76x2u_mcu_fw_init(dev);
182 	if (err < 0)
183 		return err;
184 
185 	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
186 			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
187 			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
188 		return -EIO;
189 
190 	/* wait for asic ready after fw load. */
191 	if (!mt76x02_wait_for_mac(&dev->mt76))
192 		return -ETIMEDOUT;
193 
194 	mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
195 	mt76_wr(dev, MT_TSO_CTRL, 0);
196 
197 	mt76x2u_init_dma(dev);
198 
199 	err = mt76x2u_mcu_init(dev);
200 	if (err < 0)
201 		return err;
202 
203 	err = mt76x2u_mac_reset(dev);
204 	if (err < 0)
205 		return err;
206 
207 	mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
208 	dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
209 
210 	mt76x2u_init_beacon_offsets(dev);
211 
212 	if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
213 		return -ETIMEDOUT;
214 
215 	/* reset wcid table */
216 	for (i = 0; i < 254; i++)
217 		mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
218 			     sizeof(struct mt76_wcid_addr));
219 
220 	/* reset shared key table and pairwise key table */
221 	for (i = 0; i < 4; i++)
222 		mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
223 	for (i = 0; i < 256; i++)
224 		mt76_wr(dev, MT_WCID_ATTR(i), 1);
225 
226 	mt76_clear(dev, MT_BEACON_TIME_CFG,
227 		   MT_BEACON_TIME_CFG_TIMER_EN |
228 		   MT_BEACON_TIME_CFG_SYNC_MODE |
229 		   MT_BEACON_TIME_CFG_TBTT_EN |
230 		   MT_BEACON_TIME_CFG_BEACON_TX);
231 
232 	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
233 	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
234 
235 	err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
236 	if (err < 0)
237 		return err;
238 
239 	mt76x02_phy_set_rxpath(dev);
240 	mt76x02_phy_set_txdac(dev);
241 
242 	return mt76x2u_mac_stop(dev);
243 }
244 
245 int mt76x2u_register_device(struct mt76x02_dev *dev)
246 {
247 	struct ieee80211_hw *hw = mt76_hw(dev);
248 	int err;
249 
250 	INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
251 	mt76x02_init_device(dev);
252 
253 	err = mt76x2u_init_eeprom(dev);
254 	if (err < 0)
255 		return err;
256 
257 	err = mt76u_alloc_queues(&dev->mt76);
258 	if (err < 0)
259 		goto fail;
260 
261 	err = mt76u_mcu_init_rx(&dev->mt76);
262 	if (err < 0)
263 		goto fail;
264 
265 	err = mt76x2u_init_hardware(dev);
266 	if (err < 0)
267 		goto fail;
268 
269 	err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
270 				   ARRAY_SIZE(mt76x02_rates));
271 	if (err)
272 		goto fail;
273 
274 	/* check hw sg support in order to enable AMSDU */
275 	if (mt76u_check_sg(&dev->mt76))
276 		hw->max_tx_fragments = MT_SG_MAX_SIZE;
277 	else
278 		hw->max_tx_fragments = 1;
279 
280 	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
281 
282 	mt76x02_init_debugfs(dev);
283 	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
284 	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
285 
286 	return 0;
287 
288 fail:
289 	mt76x2u_cleanup(dev);
290 	return err;
291 }
292 
293 void mt76x2u_stop_hw(struct mt76x02_dev *dev)
294 {
295 	mt76u_stop_stat_wk(&dev->mt76);
296 	cancel_delayed_work_sync(&dev->cal_work);
297 	cancel_delayed_work_sync(&dev->mac_work);
298 	mt76x2u_mac_stop(dev);
299 }
300 
301 void mt76x2u_cleanup(struct mt76x02_dev *dev)
302 {
303 	mt76x02_mcu_set_radio_state(dev, false, false);
304 	mt76x2u_stop_hw(dev);
305 	mt76u_queues_deinit(&dev->mt76);
306 	mt76u_mcu_deinit(&dev->mt76);
307 }
308