1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 */
5
6 #include <linux/module.h>
7 #include <linux/firmware.h>
8
9 #include "mt76x02.h"
10 #include "mt76x02_mcu.h"
11 #include "mt76x02_usb.h"
12
13 #define MT_CMD_HDR_LEN 4
14
15 #define MT_FCE_DMA_ADDR 0x0230
16 #define MT_FCE_DMA_LEN 0x0234
17
18 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
19
20 static void
mt76x02u_multiple_mcu_reads(struct mt76_dev * dev,u8 * data,int len)21 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
22 {
23 struct mt76_usb *usb = &dev->usb;
24 int i;
25
26 WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
27
28 for (i = 0; i < usb->mcu.rp_len; i++) {
29 u32 reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base;
30 u32 val = get_unaligned_le32(data + 8 * i + 4);
31
32 WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
33 usb->mcu.rp[i].value = val;
34 }
35 }
36
mt76x02u_mcu_wait_resp(struct mt76_dev * dev,u8 seq)37 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
38 {
39 struct mt76_usb *usb = &dev->usb;
40 u8 *data = usb->mcu.data;
41 int i, len, ret;
42 u32 rxfce;
43
44 for (i = 0; i < 5; i++) {
45 ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
46 300, MT_EP_IN_CMD_RESP);
47 if (ret == -ETIMEDOUT)
48 continue;
49 if (ret)
50 goto out;
51
52 if (usb->mcu.rp)
53 mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
54
55 rxfce = get_unaligned_le32(data);
56 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
57 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
58 return 0;
59
60 dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
61 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
62 seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
63 }
64 out:
65 dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
66 return ret;
67 }
68
69 static int
__mt76x02u_mcu_send_msg(struct mt76_dev * dev,struct sk_buff * skb,int cmd,bool wait_resp)70 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
71 int cmd, bool wait_resp)
72 {
73 u8 seq = 0;
74 u32 info;
75 int ret;
76
77 if (test_bit(MT76_REMOVED, &dev->phy.state)) {
78 ret = 0;
79 goto out;
80 }
81
82 if (wait_resp) {
83 seq = ++dev->mcu.msg_seq & 0xf;
84 if (!seq)
85 seq = ++dev->mcu.msg_seq & 0xf;
86 }
87
88 info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
89 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
90 MT_MCU_MSG_TYPE_CMD;
91 ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
92 if (ret)
93 return ret;
94
95 ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
96 MT_EP_OUT_INBAND_CMD);
97 if (ret)
98 goto out;
99
100 if (wait_resp)
101 ret = mt76x02u_mcu_wait_resp(dev, seq);
102
103 out:
104 consume_skb(skb);
105
106 return ret;
107 }
108
109 static int
mt76x02u_mcu_send_msg(struct mt76_dev * dev,int cmd,const void * data,int len,bool wait_resp)110 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
111 int len, bool wait_resp)
112 {
113 struct sk_buff *skb;
114 int err;
115
116 skb = mt76_mcu_msg_alloc(dev, data, len);
117 if (!skb)
118 return -ENOMEM;
119
120 mutex_lock(&dev->mcu.mutex);
121 err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
122 mutex_unlock(&dev->mcu.mutex);
123
124 return err;
125 }
126
skb_put_le32(struct sk_buff * skb,u32 val)127 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
128 {
129 put_unaligned_le32(val, skb_put(skb, 4));
130 }
131
132 static int
mt76x02u_mcu_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)133 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
134 const struct mt76_reg_pair *data, int n)
135 {
136 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
137 const int CMD_RANDOM_WRITE = 12;
138 struct sk_buff *skb;
139 int cnt, i, ret;
140
141 if (!n)
142 return 0;
143
144 cnt = min(max_vals_per_cmd, n);
145
146 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
147 if (!skb)
148 return -ENOMEM;
149 skb_reserve(skb, MT_DMA_HDR_LEN);
150
151 for (i = 0; i < cnt; i++) {
152 skb_put_le32(skb, base + data[i].reg);
153 skb_put_le32(skb, data[i].value);
154 }
155
156 mutex_lock(&dev->mcu.mutex);
157 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
158 mutex_unlock(&dev->mcu.mutex);
159 if (ret)
160 return ret;
161
162 return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
163 }
164
165 static int
mt76x02u_mcu_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int n)166 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
167 struct mt76_reg_pair *data, int n)
168 {
169 const int CMD_RANDOM_READ = 10;
170 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
171 struct mt76_usb *usb = &dev->usb;
172 struct sk_buff *skb;
173 int cnt, i, ret;
174
175 if (!n)
176 return 0;
177
178 cnt = min(max_vals_per_cmd, n);
179 if (cnt != n)
180 return -EINVAL;
181
182 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
183 if (!skb)
184 return -ENOMEM;
185 skb_reserve(skb, MT_DMA_HDR_LEN);
186
187 for (i = 0; i < cnt; i++) {
188 skb_put_le32(skb, base + data[i].reg);
189 skb_put_le32(skb, data[i].value);
190 }
191
192 mutex_lock(&dev->mcu.mutex);
193
194 usb->mcu.rp = data;
195 usb->mcu.rp_len = n;
196 usb->mcu.base = base;
197
198 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
199
200 usb->mcu.rp = NULL;
201
202 mutex_unlock(&dev->mcu.mutex);
203
204 return ret;
205 }
206
mt76x02u_mcu_fw_reset(struct mt76x02_dev * dev)207 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
208 {
209 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
210 USB_DIR_OUT | USB_TYPE_VENDOR,
211 0x1, 0, NULL, 0);
212 }
213 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
214
215 static int
__mt76x02u_mcu_fw_send_data(struct mt76x02_dev * dev,u8 * data,const void * fw_data,int len,u32 dst_addr)216 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
217 const void *fw_data, int len, u32 dst_addr)
218 {
219 __le32 info;
220 u32 val;
221 int err, data_len;
222
223 info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
224 FIELD_PREP(MT_MCU_MSG_LEN, len) |
225 MT_MCU_MSG_TYPE_CMD);
226
227 memcpy(data, &info, sizeof(info));
228 memcpy(data + sizeof(info), fw_data, len);
229 memset(data + sizeof(info) + len, 0, 4);
230
231 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
232 MT_FCE_DMA_ADDR, dst_addr);
233 len = roundup(len, 4);
234 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
235 MT_FCE_DMA_LEN, len << 16);
236
237 data_len = MT_CMD_HDR_LEN + len + sizeof(info);
238
239 err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
240 MT_EP_OUT_INBAND_CMD);
241 if (err) {
242 dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
243 return err;
244 }
245
246 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
247 val++;
248 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
249
250 return 0;
251 }
252
mt76x02u_mcu_fw_send_data(struct mt76x02_dev * dev,const void * data,int data_len,u32 max_payload,u32 offset)253 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
254 int data_len, u32 max_payload, u32 offset)
255 {
256 int len, err = 0, pos = 0, max_len = max_payload - 8;
257 u8 *buf;
258
259 buf = kmalloc(max_payload, GFP_KERNEL);
260 if (!buf)
261 return -ENOMEM;
262
263 while (data_len > 0) {
264 len = min_t(int, data_len, max_len);
265 err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
266 len, offset + pos);
267 if (err < 0)
268 break;
269
270 data_len -= len;
271 pos += len;
272 usleep_range(5000, 10000);
273 }
274 kfree(buf);
275
276 return err;
277 }
278 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
279
mt76x02u_init_mcu(struct mt76_dev * dev)280 void mt76x02u_init_mcu(struct mt76_dev *dev)
281 {
282 static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
283 .headroom = MT_CMD_HDR_LEN,
284 .tailroom = 8,
285 .mcu_send_msg = mt76x02u_mcu_send_msg,
286 .mcu_parse_response = mt76x02_mcu_parse_response,
287 .mcu_wr_rp = mt76x02u_mcu_wr_rp,
288 .mcu_rd_rp = mt76x02u_mcu_rd_rp,
289 };
290
291 dev->mcu_ops = &mt76x02u_mcu_ops;
292 }
293 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
294
295 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
296 MODULE_LICENSE("Dual BSD/GPL");
297