1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/platform_device.h>
7 #include <linux/pci.h>
8
9 #include "mt7615.h"
10 #include "regs.h"
11 #include "mac.h"
12 #include "../trace.h"
13
14 const u32 mt7615e_reg_map[] = {
15 [MT_TOP_CFG_BASE] = 0x01000,
16 [MT_HW_BASE] = 0x01000,
17 [MT_PCIE_REMAP_2] = 0x02504,
18 [MT_ARB_BASE] = 0x20c00,
19 [MT_HIF_BASE] = 0x04000,
20 [MT_CSR_BASE] = 0x07000,
21 [MT_PLE_BASE] = 0x08000,
22 [MT_PSE_BASE] = 0x0c000,
23 [MT_CFG_BASE] = 0x20200,
24 [MT_AGG_BASE] = 0x20a00,
25 [MT_TMAC_BASE] = 0x21000,
26 [MT_RMAC_BASE] = 0x21200,
27 [MT_DMA_BASE] = 0x21800,
28 [MT_PF_BASE] = 0x22000,
29 [MT_WTBL_BASE_ON] = 0x23000,
30 [MT_WTBL_BASE_OFF] = 0x23400,
31 [MT_LPON_BASE] = 0x24200,
32 [MT_MIB_BASE] = 0x24800,
33 [MT_WTBL_BASE_ADDR] = 0x30000,
34 [MT_PCIE_REMAP_BASE2] = 0x80000,
35 [MT_TOP_MISC_BASE] = 0xc0000,
36 [MT_EFUSE_ADDR_BASE] = 0x81070000,
37 };
38
39 const u32 mt7663e_reg_map[] = {
40 [MT_TOP_CFG_BASE] = 0x01000,
41 [MT_HW_BASE] = 0x02000,
42 [MT_DMA_SHDL_BASE] = 0x06000,
43 [MT_PCIE_REMAP_2] = 0x0700c,
44 [MT_ARB_BASE] = 0x20c00,
45 [MT_HIF_BASE] = 0x04000,
46 [MT_CSR_BASE] = 0x07000,
47 [MT_PLE_BASE] = 0x08000,
48 [MT_PSE_BASE] = 0x0c000,
49 [MT_PP_BASE] = 0x0e000,
50 [MT_CFG_BASE] = 0x20000,
51 [MT_AGG_BASE] = 0x22000,
52 [MT_TMAC_BASE] = 0x24000,
53 [MT_RMAC_BASE] = 0x25000,
54 [MT_DMA_BASE] = 0x27000,
55 [MT_PF_BASE] = 0x28000,
56 [MT_WTBL_BASE_ON] = 0x29000,
57 [MT_WTBL_BASE_OFF] = 0x29800,
58 [MT_LPON_BASE] = 0x2b000,
59 [MT_MIB_BASE] = 0x2d000,
60 [MT_WTBL_BASE_ADDR] = 0x30000,
61 [MT_PCIE_REMAP_BASE2] = 0x90000,
62 [MT_TOP_MISC_BASE] = 0xc0000,
63 [MT_EFUSE_ADDR_BASE] = 0x78011000,
64 };
65
66 static void
mt7615_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)67 mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
68 {
69 mt76_connac_irq_enable(mdev, MT_INT_RX_DONE(q));
70 }
71
mt7615_irq_handler(int irq,void * dev_instance)72 static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
73 {
74 struct mt7615_dev *dev = dev_instance;
75
76 mt76_wr(dev, MT_INT_MASK_CSR, 0);
77
78 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
79 return IRQ_NONE;
80
81 tasklet_schedule(&dev->mt76.irq_tasklet);
82
83 return IRQ_HANDLED;
84 }
85
mt7615_irq_tasklet(struct tasklet_struct * t)86 static void mt7615_irq_tasklet(struct tasklet_struct *t)
87 {
88 struct mt7615_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
89 u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
90 u32 mcu_int;
91
92 mt76_wr(dev, MT_INT_MASK_CSR, 0);
93
94 intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
95 intr &= dev->mt76.mmio.irqmask;
96 mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
97
98 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
99
100 mask |= intr & MT_INT_RX_DONE_ALL;
101 if (intr & tx_mcu_mask)
102 mask |= tx_mcu_mask;
103 mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
104
105 if (intr & tx_mcu_mask)
106 napi_schedule(&dev->mt76.tx_napi);
107
108 if (intr & MT_INT_RX_DONE(0))
109 napi_schedule(&dev->mt76.napi[0]);
110
111 if (intr & MT_INT_RX_DONE(1))
112 napi_schedule(&dev->mt76.napi[1]);
113
114 if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD)))
115 return;
116
117 if (is_mt7663(&dev->mt76)) {
118 mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS);
119 mcu_int &= MT7663_MCU_CMD_ERROR_MASK;
120 mt76_wr(dev, MT_MCU2HOST_INT_STATUS, mcu_int);
121 } else {
122 mcu_int = mt76_rr(dev, MT_MCU_CMD);
123 mcu_int &= MT_MCU_CMD_ERROR_MASK;
124 }
125
126 if (!mcu_int)
127 return;
128
129 dev->reset_state = mcu_int;
130 queue_work(dev->mt76.wq, &dev->reset_work);
131 wake_up(&dev->reset_wait);
132 }
133
__mt7615_reg_addr(struct mt7615_dev * dev,u32 addr)134 static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
135 {
136 if (addr < 0x100000)
137 return addr;
138
139 return mt7615_reg_map(dev, addr);
140 }
141
mt7615_rr(struct mt76_dev * mdev,u32 offset)142 static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset)
143 {
144 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
145 u32 addr = __mt7615_reg_addr(dev, offset);
146
147 return dev->bus_ops->rr(mdev, addr);
148 }
149
mt7615_wr(struct mt76_dev * mdev,u32 offset,u32 val)150 static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val)
151 {
152 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
153 u32 addr = __mt7615_reg_addr(dev, offset);
154
155 dev->bus_ops->wr(mdev, addr, val);
156 }
157
mt7615_rmw(struct mt76_dev * mdev,u32 offset,u32 mask,u32 val)158 static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
159 {
160 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
161 u32 addr = __mt7615_reg_addr(dev, offset);
162
163 return dev->bus_ops->rmw(mdev, addr, mask, val);
164 }
165
mt7615_mmio_probe(struct device * pdev,void __iomem * mem_base,int irq,const u32 * map)166 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
167 int irq, const u32 *map)
168 {
169 static const struct mt76_driver_ops drv_ops = {
170 /* txwi_size = txd size + txp size */
171 .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_txp_common),
172 .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
173 .survey_flags = SURVEY_INFO_TIME_TX |
174 SURVEY_INFO_TIME_RX |
175 SURVEY_INFO_TIME_BSS_RX,
176 .token_size = MT7615_TOKEN_SIZE,
177 .tx_prepare_skb = mt7615_tx_prepare_skb,
178 .tx_complete_skb = mt76_connac_tx_complete_skb,
179 .rx_check = mt7615_rx_check,
180 .rx_skb = mt7615_queue_rx_skb,
181 .rx_poll_complete = mt7615_rx_poll_complete,
182 .sta_add = mt7615_mac_sta_add,
183 .sta_remove = mt7615_mac_sta_remove,
184 .update_survey = mt7615_update_channel,
185 };
186 struct mt76_bus_ops *bus_ops;
187 struct ieee80211_ops *ops;
188 struct mt7615_dev *dev;
189 struct mt76_dev *mdev;
190 int ret;
191
192 ops = devm_kmemdup(pdev, &mt7615_ops, sizeof(mt7615_ops), GFP_KERNEL);
193 if (!ops)
194 return -ENOMEM;
195
196 mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
197 if (!mdev)
198 return -ENOMEM;
199
200 dev = container_of(mdev, struct mt7615_dev, mt76);
201 mt76_mmio_init(&dev->mt76, mem_base);
202 tasklet_setup(&mdev->irq_tasklet, mt7615_irq_tasklet);
203
204 dev->reg_map = map;
205 dev->ops = ops;
206 mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
207 (mt76_rr(dev, MT_HW_REV) & 0xff);
208 dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
209
210 dev->bus_ops = dev->mt76.bus;
211 bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
212 GFP_KERNEL);
213 if (!bus_ops) {
214 ret = -ENOMEM;
215 goto err_free_dev;
216 }
217
218 bus_ops->rr = mt7615_rr;
219 bus_ops->wr = mt7615_wr;
220 bus_ops->rmw = mt7615_rmw;
221 dev->mt76.bus = bus_ops;
222
223 mt76_wr(dev, MT_INT_MASK_CSR, 0);
224
225 ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
226 IRQF_SHARED, KBUILD_MODNAME, dev);
227 if (ret)
228 goto err_free_dev;
229
230 if (is_mt7663(mdev))
231 mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
232
233 ret = mt7615_register_device(dev);
234 if (ret)
235 goto err_free_irq;
236
237 return 0;
238
239 err_free_irq:
240 devm_free_irq(pdev, irq, dev);
241 err_free_dev:
242 mt76_free_device(&dev->mt76);
243
244 return ret;
245 }
246
mt7615_init(void)247 static int __init mt7615_init(void)
248 {
249 int ret;
250
251 ret = pci_register_driver(&mt7615_pci_driver);
252 if (ret)
253 return ret;
254
255 if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
256 ret = platform_driver_register(&mt7622_wmac_driver);
257 if (ret)
258 pci_unregister_driver(&mt7615_pci_driver);
259 }
260
261 return ret;
262 }
263
mt7615_exit(void)264 static void __exit mt7615_exit(void)
265 {
266 if (IS_ENABLED(CONFIG_MT7622_WMAC))
267 platform_driver_unregister(&mt7622_wmac_driver);
268 pci_unregister_driver(&mt7615_pci_driver);
269 }
270
271 module_init(mt7615_init);
272 module_exit(mt7615_exit);
273 MODULE_LICENSE("Dual BSD/GPL");
274