xref: /openbmc/linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 
20 #include "mt76x02.h"
21 #include "mt76x02_mcu.h"
22 #include "mt76x02_usb.h"
23 
24 #define MT_CMD_HDR_LEN			4
25 
26 #define MT_FCE_DMA_ADDR			0x0230
27 #define MT_FCE_DMA_LEN			0x0234
28 
29 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
30 
31 static struct sk_buff *
32 mt76x02u_mcu_msg_alloc(const void *data, int len)
33 {
34 	struct sk_buff *skb;
35 
36 	skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
37 	if (!skb)
38 		return NULL;
39 
40 	skb_reserve(skb, MT_CMD_HDR_LEN);
41 	skb_put_data(skb, data, len);
42 
43 	return skb;
44 }
45 
46 static void
47 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
48 {
49 	struct mt76_usb *usb = &dev->usb;
50 	u32 reg, val;
51 	int i;
52 
53 	if (usb->mcu.burst) {
54 		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
55 
56 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
57 		for (i = 0; i < usb->mcu.rp_len; i++) {
58 			val = get_unaligned_le32(data + 4 * i);
59 			usb->mcu.rp[i].reg = reg++;
60 			usb->mcu.rp[i].value = val;
61 		}
62 	} else {
63 		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
64 
65 		for (i = 0; i < usb->mcu.rp_len; i++) {
66 			reg = get_unaligned_le32(data + 8 * i) -
67 			      usb->mcu.base;
68 			val = get_unaligned_le32(data + 8 * i + 4);
69 
70 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
71 			usb->mcu.rp[i].value = val;
72 		}
73 	}
74 }
75 
76 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
77 {
78 	struct mt76_usb *usb = &dev->usb;
79 	struct mt76u_buf *buf = &usb->mcu.res;
80 	struct urb *urb = buf->urb;
81 	int i, ret;
82 	u32 rxfce;
83 	u8 *data;
84 
85 	for (i = 0; i < 5; i++) {
86 		if (!wait_for_completion_timeout(&usb->mcu.cmpl,
87 						 msecs_to_jiffies(300)))
88 			continue;
89 
90 		if (urb->status)
91 			return -EIO;
92 
93 		data = sg_virt(&urb->sg[0]);
94 		if (usb->mcu.rp)
95 			mt76x02u_multiple_mcu_reads(dev, data + 4,
96 						    urb->actual_length - 8);
97 
98 		rxfce = get_unaligned_le32(data);
99 		ret = mt76u_submit_buf(dev, USB_DIR_IN,
100 				       MT_EP_IN_CMD_RESP,
101 				       buf, GFP_KERNEL,
102 				       mt76u_mcu_complete_urb,
103 				       &usb->mcu.cmpl);
104 		if (ret)
105 			return ret;
106 
107 		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
108 		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
109 			return 0;
110 
111 		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
112 			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
113 			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
114 	}
115 
116 	dev_err(dev->dev, "error: %s timed out\n", __func__);
117 	return -ETIMEDOUT;
118 }
119 
120 static int
121 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
122 			int cmd, bool wait_resp)
123 {
124 	struct usb_interface *intf = to_usb_interface(dev->dev);
125 	struct usb_device *udev = interface_to_usbdev(intf);
126 	struct mt76_usb *usb = &dev->usb;
127 	unsigned int pipe;
128 	int ret, sent;
129 	u8 seq = 0;
130 	u32 info;
131 
132 	if (!skb)
133 		return -EINVAL;
134 
135 	if (test_bit(MT76_REMOVED, &dev->state))
136 		return 0;
137 
138 	pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
139 	if (wait_resp) {
140 		seq = ++usb->mcu.msg_seq & 0xf;
141 		if (!seq)
142 			seq = ++usb->mcu.msg_seq & 0xf;
143 	}
144 
145 	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
146 	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
147 	       MT_MCU_MSG_TYPE_CMD;
148 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
149 	if (ret)
150 		return ret;
151 
152 	ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
153 	if (ret)
154 		return ret;
155 
156 	if (wait_resp)
157 		ret = mt76x02u_mcu_wait_resp(dev, seq);
158 
159 	consume_skb(skb);
160 
161 	return ret;
162 }
163 
164 static int
165 mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
166 		      int cmd, bool wait_resp)
167 {
168 	struct mt76_usb *usb = &dev->usb;
169 	int err;
170 
171 	mutex_lock(&usb->mcu.mutex);
172 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
173 	mutex_unlock(&usb->mcu.mutex);
174 
175 	return err;
176 }
177 
178 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
179 {
180 	put_unaligned_le32(val, skb_put(skb, 4));
181 }
182 
183 static int
184 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
185 		   const struct mt76_reg_pair *data, int n)
186 {
187 	const int CMD_RANDOM_WRITE = 12;
188 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
189 	struct sk_buff *skb;
190 	int cnt, i, ret;
191 
192 	if (!n)
193 		return 0;
194 
195 	cnt = min(max_vals_per_cmd, n);
196 
197 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
198 	if (!skb)
199 		return -ENOMEM;
200 	skb_reserve(skb, MT_DMA_HDR_LEN);
201 
202 	for (i = 0; i < cnt; i++) {
203 		skb_put_le32(skb, base + data[i].reg);
204 		skb_put_le32(skb, data[i].value);
205 	}
206 
207 	ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
208 	if (ret)
209 		return ret;
210 
211 	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
212 }
213 
214 static int
215 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
216 		   struct mt76_reg_pair *data, int n)
217 {
218 	const int CMD_RANDOM_READ = 10;
219 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
220 	struct mt76_usb *usb = &dev->usb;
221 	struct sk_buff *skb;
222 	int cnt, i, ret;
223 
224 	if (!n)
225 		return 0;
226 
227 	cnt = min(max_vals_per_cmd, n);
228 	if (cnt != n)
229 		return -EINVAL;
230 
231 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
232 	if (!skb)
233 		return -ENOMEM;
234 	skb_reserve(skb, MT_DMA_HDR_LEN);
235 
236 	for (i = 0; i < cnt; i++) {
237 		skb_put_le32(skb, base + data[i].reg);
238 		skb_put_le32(skb, data[i].value);
239 	}
240 
241 	mutex_lock(&usb->mcu.mutex);
242 
243 	usb->mcu.rp = data;
244 	usb->mcu.rp_len = n;
245 	usb->mcu.base = base;
246 	usb->mcu.burst = false;
247 
248 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
249 
250 	usb->mcu.rp = NULL;
251 
252 	mutex_unlock(&usb->mcu.mutex);
253 
254 	return ret;
255 }
256 
257 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
258 {
259 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
260 			     USB_DIR_OUT | USB_TYPE_VENDOR,
261 			     0x1, 0, NULL, 0);
262 }
263 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
264 
265 static int
266 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
267 			    const void *fw_data, int len, u32 dst_addr)
268 {
269 	u8 *data = sg_virt(&buf->urb->sg[0]);
270 	DECLARE_COMPLETION_ONSTACK(cmpl);
271 	__le32 info;
272 	u32 val;
273 	int err;
274 
275 	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
276 			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
277 			   MT_MCU_MSG_TYPE_CMD);
278 
279 	memcpy(data, &info, sizeof(info));
280 	memcpy(data + sizeof(info), fw_data, len);
281 	memset(data + sizeof(info) + len, 0, 4);
282 
283 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
284 			MT_FCE_DMA_ADDR, dst_addr);
285 	len = roundup(len, 4);
286 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
287 			MT_FCE_DMA_LEN, len << 16);
288 
289 	buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
290 	err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
291 			       MT_EP_OUT_INBAND_CMD,
292 			       buf, GFP_KERNEL,
293 			       mt76u_mcu_complete_urb, &cmpl);
294 	if (err < 0)
295 		return err;
296 
297 	if (!wait_for_completion_timeout(&cmpl,
298 					 msecs_to_jiffies(1000))) {
299 		dev_err(dev->mt76.dev, "firmware upload timed out\n");
300 		usb_kill_urb(buf->urb);
301 		return -ETIMEDOUT;
302 	}
303 
304 	if (mt76u_urb_error(buf->urb)) {
305 		dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
306 			buf->urb->status);
307 		return buf->urb->status;
308 	}
309 
310 	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
311 	val++;
312 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
313 
314 	return 0;
315 }
316 
317 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
318 			      int data_len, u32 max_payload, u32 offset)
319 {
320 	int err, len, pos = 0, max_len = max_payload - 8;
321 	struct mt76u_buf buf;
322 
323 	err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
324 			      GFP_KERNEL);
325 	if (err < 0)
326 		return err;
327 
328 	while (data_len > 0) {
329 		len = min_t(int, data_len, max_len);
330 		err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
331 						  len, offset + pos);
332 		if (err < 0)
333 			break;
334 
335 		data_len -= len;
336 		pos += len;
337 		usleep_range(5000, 10000);
338 	}
339 	mt76u_buf_free(&buf);
340 
341 	return err;
342 }
343 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
344 
345 void mt76x02u_init_mcu(struct mt76_dev *dev)
346 {
347 	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
348 		.mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
349 		.mcu_send_msg = mt76x02u_mcu_send_msg,
350 		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
351 		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
352 	};
353 
354 	dev->mcu_ops = &mt76x02u_mcu_ops;
355 }
356 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
357 
358 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
359 MODULE_LICENSE("Dual BSD/GPL");
360