1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 
21 #include "mt76x0.h"
22 #include "mcu.h"
23 
24 static int mt76x0e_start(struct ieee80211_hw *hw)
25 {
26 	struct mt76x02_dev *dev = hw->priv;
27 
28 	mutex_lock(&dev->mt76.mutex);
29 
30 	mt76x02_mac_start(dev);
31 	mt76x0_phy_calibrate(dev, true);
32 	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
33 				     MT_MAC_WORK_INTERVAL);
34 	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
35 				     MT_CALIBRATE_INTERVAL);
36 	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
37 
38 	mutex_unlock(&dev->mt76.mutex);
39 
40 	return 0;
41 }
42 
43 static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
44 {
45 	cancel_delayed_work_sync(&dev->cal_work);
46 	cancel_delayed_work_sync(&dev->mac_work);
47 
48 	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
49 		       0, 1000))
50 		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
51 	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
52 
53 	mt76x0_mac_stop(dev);
54 
55 	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
56 		       0, 1000))
57 		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
58 	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
59 }
60 
61 static void mt76x0e_stop(struct ieee80211_hw *hw)
62 {
63 	struct mt76x02_dev *dev = hw->priv;
64 
65 	mutex_lock(&dev->mt76.mutex);
66 	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
67 	mt76x0e_stop_hw(dev);
68 	mutex_unlock(&dev->mt76.mutex);
69 }
70 
71 static void
72 mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
73 	      u32 queues, bool drop)
74 {
75 }
76 
77 static int
78 mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
79 		bool set)
80 {
81 	return 0;
82 }
83 
84 static const struct ieee80211_ops mt76x0e_ops = {
85 	.tx = mt76x02_tx,
86 	.start = mt76x0e_start,
87 	.stop = mt76x0e_stop,
88 	.add_interface = mt76x02_add_interface,
89 	.remove_interface = mt76x02_remove_interface,
90 	.config = mt76x0_config,
91 	.configure_filter = mt76x02_configure_filter,
92 	.bss_info_changed = mt76x02_bss_info_changed,
93 	.sta_state = mt76_sta_state,
94 	.set_key = mt76x02_set_key,
95 	.conf_tx = mt76x02_conf_tx,
96 	.sw_scan_start = mt76x02_sw_scan,
97 	.sw_scan_complete = mt76x02_sw_scan_complete,
98 	.ampdu_action = mt76x02_ampdu_action,
99 	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
100 	.wake_tx_queue = mt76_wake_tx_queue,
101 	.get_survey = mt76_get_survey,
102 	.get_txpower = mt76_get_txpower,
103 	.flush = mt76x0e_flush,
104 	.set_tim = mt76x0e_set_tim,
105 	.release_buffered_frames = mt76_release_buffered_frames,
106 	.set_coverage_class = mt76x02_set_coverage_class,
107 	.set_rts_threshold = mt76x02_set_rts_threshold,
108 };
109 
110 static int mt76x0e_register_device(struct mt76x02_dev *dev)
111 {
112 	int err;
113 
114 	mt76x0_chip_onoff(dev, true, false);
115 	if (!mt76x02_wait_for_mac(&dev->mt76))
116 		return -ETIMEDOUT;
117 
118 	mt76x02_dma_disable(dev);
119 	err = mt76x0e_mcu_init(dev);
120 	if (err < 0)
121 		return err;
122 
123 	err = mt76x02_dma_init(dev);
124 	if (err < 0)
125 		return err;
126 
127 	err = mt76x0_init_hardware(dev);
128 	if (err < 0)
129 		return err;
130 
131 	if (mt76_chip(&dev->mt76) == 0x7610) {
132 		u16 val;
133 
134 		mt76_clear(dev, MT_COEXCFG0, BIT(0));
135 
136 		val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
137 		if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
138 			mt76_set(dev, MT_XO_CTRL7, 0xc03);
139 	}
140 
141 	mt76_clear(dev, 0x110, BIT(9));
142 	mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
143 
144 	mt76_wr(dev, MT_CH_TIME_CFG,
145 		MT_CH_TIME_CFG_TIMER_EN |
146 		MT_CH_TIME_CFG_TX_AS_BUSY |
147 		MT_CH_TIME_CFG_RX_AS_BUSY |
148 		MT_CH_TIME_CFG_NAV_AS_BUSY |
149 		MT_CH_TIME_CFG_EIFS_AS_BUSY |
150 		MT_CH_CCA_RC_EN |
151 		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
152 
153 	err = mt76x0_register_device(dev);
154 	if (err < 0)
155 		return err;
156 
157 	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
158 
159 	return 0;
160 }
161 
162 static int
163 mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
164 {
165 	static const struct mt76_driver_ops drv_ops = {
166 		.txwi_size = sizeof(struct mt76x02_txwi),
167 		.update_survey = mt76x02_update_channel,
168 		.tx_prepare_skb = mt76x02_tx_prepare_skb,
169 		.tx_complete_skb = mt76x02_tx_complete_skb,
170 		.rx_skb = mt76x02_queue_rx_skb,
171 		.rx_poll_complete = mt76x02_rx_poll_complete,
172 		.sta_ps = mt76x02_sta_ps,
173 		.sta_add = mt76x02_sta_add,
174 		.sta_remove = mt76x02_sta_remove,
175 	};
176 	struct mt76x02_dev *dev;
177 	struct mt76_dev *mdev;
178 	int ret;
179 
180 	ret = pcim_enable_device(pdev);
181 	if (ret)
182 		return ret;
183 
184 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
185 	if (ret)
186 		return ret;
187 
188 	pci_set_master(pdev);
189 
190 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
191 	if (ret)
192 		return ret;
193 
194 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
195 				 &drv_ops);
196 	if (!mdev)
197 		return -ENOMEM;
198 
199 	dev = container_of(mdev, struct mt76x02_dev, mt76);
200 	mutex_init(&dev->phy_mutex);
201 
202 	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
203 
204 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
205 	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
206 
207 	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
208 			       IRQF_SHARED, KBUILD_MODNAME, dev);
209 	if (ret)
210 		goto error;
211 
212 	ret = mt76x0e_register_device(dev);
213 	if (ret < 0)
214 		goto error;
215 
216 	return 0;
217 
218 error:
219 	ieee80211_free_hw(mt76_hw(dev));
220 	return ret;
221 }
222 
223 static void mt76x0e_cleanup(struct mt76x02_dev *dev)
224 {
225 	clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
226 	tasklet_disable(&dev->pre_tbtt_tasklet);
227 	mt76x0_chip_onoff(dev, false, false);
228 	mt76x0e_stop_hw(dev);
229 	mt76x02_dma_cleanup(dev);
230 	mt76x02_mcu_cleanup(dev);
231 }
232 
233 static void
234 mt76x0e_remove(struct pci_dev *pdev)
235 {
236 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
237 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
238 
239 	mt76_unregister_device(mdev);
240 	mt76x0e_cleanup(dev);
241 	ieee80211_free_hw(mdev->hw);
242 }
243 
244 static const struct pci_device_id mt76x0e_device_table[] = {
245 	{ PCI_DEVICE(0x14c3, 0x7630) },
246 	{ PCI_DEVICE(0x14c3, 0x7650) },
247 	{ },
248 };
249 
250 MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
251 MODULE_FIRMWARE(MT7610E_FIRMWARE);
252 MODULE_FIRMWARE(MT7650E_FIRMWARE);
253 MODULE_LICENSE("Dual BSD/GPL");
254 
255 static struct pci_driver mt76x0e_driver = {
256 	.name		= KBUILD_MODNAME,
257 	.id_table	= mt76x0e_device_table,
258 	.probe		= mt76x0e_probe,
259 	.remove		= mt76x0e_remove,
260 };
261 
262 module_pci_driver(mt76x0e_driver);
263