1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 
21 #include "mt76x0.h"
22 #include "mcu.h"
23 
24 static int mt76x0e_start(struct ieee80211_hw *hw)
25 {
26 	struct mt76x02_dev *dev = hw->priv;
27 
28 	mt76x02_mac_start(dev);
29 	mt76x0_phy_calibrate(dev, true);
30 	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
31 				     MT_MAC_WORK_INTERVAL);
32 	ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
33 				     MT_CALIBRATE_INTERVAL);
34 	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
35 
36 	return 0;
37 }
38 
39 static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
40 {
41 	cancel_delayed_work_sync(&dev->cal_work);
42 	cancel_delayed_work_sync(&dev->mt76.mac_work);
43 
44 	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
45 		       0, 1000))
46 		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
47 	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
48 
49 	mt76x0_mac_stop(dev);
50 
51 	if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
52 		       0, 1000))
53 		dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
54 	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
55 }
56 
57 static void mt76x0e_stop(struct ieee80211_hw *hw)
58 {
59 	struct mt76x02_dev *dev = hw->priv;
60 
61 	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
62 	mt76x0e_stop_hw(dev);
63 }
64 
65 static void
66 mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
67 	      u32 queues, bool drop)
68 {
69 }
70 
71 static const struct ieee80211_ops mt76x0e_ops = {
72 	.tx = mt76x02_tx,
73 	.start = mt76x0e_start,
74 	.stop = mt76x0e_stop,
75 	.add_interface = mt76x02_add_interface,
76 	.remove_interface = mt76x02_remove_interface,
77 	.config = mt76x0_config,
78 	.configure_filter = mt76x02_configure_filter,
79 	.bss_info_changed = mt76x02_bss_info_changed,
80 	.sta_state = mt76_sta_state,
81 	.set_key = mt76x02_set_key,
82 	.conf_tx = mt76x02_conf_tx,
83 	.sw_scan_start = mt76x02_sw_scan,
84 	.sw_scan_complete = mt76x02_sw_scan_complete,
85 	.ampdu_action = mt76x02_ampdu_action,
86 	.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
87 	.wake_tx_queue = mt76_wake_tx_queue,
88 	.get_survey = mt76_get_survey,
89 	.get_txpower = mt76_get_txpower,
90 	.flush = mt76x0e_flush,
91 	.set_tim = mt76_set_tim,
92 	.release_buffered_frames = mt76_release_buffered_frames,
93 	.set_coverage_class = mt76x02_set_coverage_class,
94 	.set_rts_threshold = mt76x02_set_rts_threshold,
95 };
96 
97 static int mt76x0e_register_device(struct mt76x02_dev *dev)
98 {
99 	int err;
100 
101 	mt76x0_chip_onoff(dev, true, false);
102 	if (!mt76x02_wait_for_mac(&dev->mt76))
103 		return -ETIMEDOUT;
104 
105 	mt76x02_dma_disable(dev);
106 	err = mt76x0e_mcu_init(dev);
107 	if (err < 0)
108 		return err;
109 
110 	err = mt76x02_dma_init(dev);
111 	if (err < 0)
112 		return err;
113 
114 	err = mt76x0_init_hardware(dev);
115 	if (err < 0)
116 		return err;
117 
118 	mt76x02e_init_beacon_config(dev);
119 
120 	if (mt76_chip(&dev->mt76) == 0x7610) {
121 		u16 val;
122 
123 		mt76_clear(dev, MT_COEXCFG0, BIT(0));
124 
125 		val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
126 		if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
127 			mt76_set(dev, MT_XO_CTRL7, 0xc03);
128 	}
129 
130 	mt76_clear(dev, 0x110, BIT(9));
131 	mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
132 
133 	mt76_wr(dev, MT_CH_TIME_CFG,
134 		MT_CH_TIME_CFG_TIMER_EN |
135 		MT_CH_TIME_CFG_TX_AS_BUSY |
136 		MT_CH_TIME_CFG_RX_AS_BUSY |
137 		MT_CH_TIME_CFG_NAV_AS_BUSY |
138 		MT_CH_TIME_CFG_EIFS_AS_BUSY |
139 		MT_CH_CCA_RC_EN |
140 		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
141 
142 	err = mt76x0_register_device(dev);
143 	if (err < 0)
144 		return err;
145 
146 	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
147 
148 	return 0;
149 }
150 
151 static int
152 mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
153 {
154 	static const struct mt76_driver_ops drv_ops = {
155 		.txwi_size = sizeof(struct mt76x02_txwi),
156 		.tx_aligned4_skbs = true,
157 		.update_survey = mt76x02_update_channel,
158 		.tx_prepare_skb = mt76x02_tx_prepare_skb,
159 		.tx_complete_skb = mt76x02_tx_complete_skb,
160 		.rx_skb = mt76x02_queue_rx_skb,
161 		.rx_poll_complete = mt76x02_rx_poll_complete,
162 		.sta_ps = mt76x02_sta_ps,
163 		.sta_add = mt76x02_sta_add,
164 		.sta_remove = mt76x02_sta_remove,
165 	};
166 	struct mt76x02_dev *dev;
167 	struct mt76_dev *mdev;
168 	int ret;
169 
170 	ret = pcim_enable_device(pdev);
171 	if (ret)
172 		return ret;
173 
174 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
175 	if (ret)
176 		return ret;
177 
178 	pci_set_master(pdev);
179 
180 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
181 	if (ret)
182 		return ret;
183 
184 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
185 				 &drv_ops);
186 	if (!mdev)
187 		return -ENOMEM;
188 
189 	dev = container_of(mdev, struct mt76x02_dev, mt76);
190 	mutex_init(&dev->phy_mutex);
191 
192 	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
193 
194 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
195 	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
196 
197 	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
198 			       IRQF_SHARED, KBUILD_MODNAME, dev);
199 	if (ret)
200 		goto error;
201 
202 	ret = mt76x0e_register_device(dev);
203 	if (ret < 0)
204 		goto error;
205 
206 	return 0;
207 
208 error:
209 	ieee80211_free_hw(mt76_hw(dev));
210 	return ret;
211 }
212 
213 static void mt76x0e_cleanup(struct mt76x02_dev *dev)
214 {
215 	clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
216 	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
217 	mt76x0_chip_onoff(dev, false, false);
218 	mt76x0e_stop_hw(dev);
219 	mt76x02_dma_cleanup(dev);
220 	mt76x02_mcu_cleanup(dev);
221 }
222 
223 static void
224 mt76x0e_remove(struct pci_dev *pdev)
225 {
226 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
227 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
228 
229 	mt76_unregister_device(mdev);
230 	mt76x0e_cleanup(dev);
231 	mt76_free_device(mdev);
232 }
233 
234 static const struct pci_device_id mt76x0e_device_table[] = {
235 	{ PCI_DEVICE(0x14c3, 0x7630) },
236 	{ PCI_DEVICE(0x14c3, 0x7650) },
237 	{ },
238 };
239 
240 MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
241 MODULE_FIRMWARE(MT7610E_FIRMWARE);
242 MODULE_FIRMWARE(MT7650E_FIRMWARE);
243 MODULE_LICENSE("Dual BSD/GPL");
244 
245 static struct pci_driver mt76x0e_driver = {
246 	.name		= KBUILD_MODNAME,
247 	.id_table	= mt76x0e_device_table,
248 	.probe		= mt76x0e_probe,
249 	.remove		= mt76x0e_remove,
250 };
251 
252 module_pci_driver(mt76x0e_driver);
253