1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 
19 #include "mt76x2u.h"
20 #include "eeprom.h"
21 #include "../mt76x02_phy.h"
22 #include "../mt76x02_usb.h"
23 
24 static void mt76x2u_init_dma(struct mt76x02_dev *dev)
25 {
26 	u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
27 
28 	val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
29 	       MT_USB_DMA_CFG_RX_BULK_EN |
30 	       MT_USB_DMA_CFG_TX_BULK_EN;
31 
32 	/* disable AGGR_BULK_RX in order to receive one
33 	 * frame in each rx urb and avoid copies
34 	 */
35 	val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
36 	mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
37 }
38 
39 static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
40 {
41 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
42 	udelay(1);
43 
44 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
45 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
46 
47 	mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
48 	udelay(1);
49 
50 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
51 	usleep_range(150, 200);
52 
53 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
54 	usleep_range(50, 100);
55 
56 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
57 }
58 
59 static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
60 {
61 	int shift = unit ? 8 : 0;
62 	u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
63 
64 	/* Enable RF BG */
65 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
66 	usleep_range(10, 20);
67 
68 	/* Enable RFDIG LDO/AFE/ABB/ADDA */
69 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
70 	usleep_range(10, 20);
71 
72 	/* Switch RFDIG power to internal LDO */
73 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
74 	usleep_range(10, 20);
75 
76 	mt76x2u_power_on_rf_patch(dev);
77 
78 	mt76_set(dev, 0x530, 0xf);
79 }
80 
81 static void mt76x2u_power_on(struct mt76x02_dev *dev)
82 {
83 	u32 val;
84 
85 	/* Turn on WL MTCMOS */
86 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
87 		 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
88 
89 	val = MT_WLAN_MTC_CTRL_STATE_UP |
90 	      MT_WLAN_MTC_CTRL_PWR_ACK |
91 	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
92 
93 	mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
94 
95 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
96 	usleep_range(10, 20);
97 
98 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
99 	usleep_range(10, 20);
100 
101 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
102 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
103 
104 	/* Turn on AD/DA power down */
105 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
106 
107 	/* WLAN function enable */
108 	mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
109 
110 	/* Release BBP software reset */
111 	mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
112 
113 	mt76x2u_power_on_rf(dev, 0);
114 	mt76x2u_power_on_rf(dev, 1);
115 }
116 
117 static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
118 {
119 	u32 val, i;
120 
121 	dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
122 					     MT7612U_EEPROM_SIZE,
123 					     GFP_KERNEL);
124 	dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
125 	if (!dev->mt76.eeprom.data)
126 		return -ENOMEM;
127 
128 	for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
129 		val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
130 		put_unaligned_le32(val, dev->mt76.eeprom.data + i);
131 	}
132 
133 	mt76x02_eeprom_parse_hw_cap(dev);
134 	return 0;
135 }
136 
137 struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
138 {
139 	static const struct mt76_driver_ops drv_ops = {
140 		.tx_prepare_skb = mt76x02u_tx_prepare_skb,
141 		.tx_complete_skb = mt76x02u_tx_complete_skb,
142 		.tx_status_data = mt76x02_tx_status_data,
143 		.rx_skb = mt76x02_queue_rx_skb,
144 		.sta_add = mt76x02_sta_add,
145 		.sta_remove = mt76x02_sta_remove,
146 	};
147 	struct mt76x02_dev *dev;
148 	struct mt76_dev *mdev;
149 
150 	mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt76x2u_ops,
151 				 &drv_ops);
152 	if (!mdev)
153 		return NULL;
154 
155 	dev = container_of(mdev, struct mt76x02_dev, mt76);
156 
157 	return dev;
158 }
159 
160 int mt76x2u_init_hardware(struct mt76x02_dev *dev)
161 {
162 	int i, k, err;
163 
164 	mt76x2_reset_wlan(dev, true);
165 	mt76x2u_power_on(dev);
166 
167 	if (!mt76x02_wait_for_mac(&dev->mt76))
168 		return -ETIMEDOUT;
169 
170 	err = mt76x2u_mcu_fw_init(dev);
171 	if (err < 0)
172 		return err;
173 
174 	if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
175 			    MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
176 			    MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
177 		return -EIO;
178 
179 	/* wait for asic ready after fw load. */
180 	if (!mt76x02_wait_for_mac(&dev->mt76))
181 		return -ETIMEDOUT;
182 
183 	mt76x2u_init_dma(dev);
184 
185 	err = mt76x2u_mcu_init(dev);
186 	if (err < 0)
187 		return err;
188 
189 	err = mt76x2u_mac_reset(dev);
190 	if (err < 0)
191 		return err;
192 
193 	mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
194 	dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
195 
196 	if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
197 		return -ETIMEDOUT;
198 
199 	/* reset wcid table */
200 	for (i = 0; i < 256; i++)
201 		mt76x02_mac_wcid_setup(dev, i, 0, NULL);
202 
203 	/* reset shared key table and pairwise key table */
204 	for (i = 0; i < 16; i++) {
205 		for (k = 0; k < 4; k++)
206 			mt76x02_mac_shared_key_setup(dev, i, k, NULL);
207 	}
208 
209 	mt76x02_init_beacon_config(dev);
210 
211 	mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
212 	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
213 
214 	err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
215 	if (err < 0)
216 		return err;
217 
218 	mt76x02_phy_set_rxpath(dev);
219 	mt76x02_phy_set_txdac(dev);
220 
221 	return mt76x2u_mac_stop(dev);
222 }
223 
224 int mt76x2u_register_device(struct mt76x02_dev *dev)
225 {
226 	struct ieee80211_hw *hw = mt76_hw(dev);
227 	int err;
228 
229 	INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
230 	mt76x02_init_device(dev);
231 
232 	err = mt76x2u_init_eeprom(dev);
233 	if (err < 0)
234 		return err;
235 
236 	err = mt76u_alloc_queues(&dev->mt76);
237 	if (err < 0)
238 		goto fail;
239 
240 	err = mt76u_mcu_init_rx(&dev->mt76);
241 	if (err < 0)
242 		goto fail;
243 
244 	err = mt76x2u_init_hardware(dev);
245 	if (err < 0)
246 		goto fail;
247 
248 	err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
249 				   ARRAY_SIZE(mt76x02_rates));
250 	if (err)
251 		goto fail;
252 
253 	/* check hw sg support in order to enable AMSDU */
254 	if (mt76u_check_sg(&dev->mt76))
255 		hw->max_tx_fragments = MT_SG_MAX_SIZE;
256 	else
257 		hw->max_tx_fragments = 1;
258 
259 	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
260 
261 	mt76x02_init_debugfs(dev);
262 	mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
263 	mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
264 
265 	return 0;
266 
267 fail:
268 	mt76x2u_cleanup(dev);
269 	return err;
270 }
271 
272 void mt76x2u_stop_hw(struct mt76x02_dev *dev)
273 {
274 	mt76u_stop_stat_wk(&dev->mt76);
275 	cancel_delayed_work_sync(&dev->cal_work);
276 	cancel_delayed_work_sync(&dev->mac_work);
277 	mt76x2u_mac_stop(dev);
278 }
279 
280 void mt76x2u_cleanup(struct mt76x02_dev *dev)
281 {
282 	mt76x02_mcu_set_radio_state(dev, false);
283 	mt76x2u_stop_hw(dev);
284 	mt76u_queues_deinit(&dev->mt76);
285 	mt76u_mcu_deinit(&dev->mt76);
286 }
287