1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 
20 #include "mt76x02.h"
21 #include "mt76x02_mcu.h"
22 #include "mt76x02_usb.h"
23 
24 #define MT_CMD_HDR_LEN			4
25 
26 #define MT_FCE_DMA_ADDR			0x0230
27 #define MT_FCE_DMA_LEN			0x0234
28 
29 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
30 
31 static struct sk_buff *
32 mt76x02u_mcu_msg_alloc(const void *data, int len)
33 {
34 	struct sk_buff *skb;
35 
36 	skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
37 	if (!skb)
38 		return NULL;
39 
40 	skb_reserve(skb, MT_CMD_HDR_LEN);
41 	skb_put_data(skb, data, len);
42 
43 	return skb;
44 }
45 
46 static void
47 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
48 {
49 	struct mt76_usb *usb = &dev->usb;
50 	u32 reg, val;
51 	int i;
52 
53 	if (usb->mcu.burst) {
54 		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
55 
56 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
57 		for (i = 0; i < usb->mcu.rp_len; i++) {
58 			val = get_unaligned_le32(data + 4 * i);
59 			usb->mcu.rp[i].reg = reg++;
60 			usb->mcu.rp[i].value = val;
61 		}
62 	} else {
63 		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
64 
65 		for (i = 0; i < usb->mcu.rp_len; i++) {
66 			reg = get_unaligned_le32(data + 8 * i) -
67 			      usb->mcu.base;
68 			val = get_unaligned_le32(data + 8 * i + 4);
69 
70 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
71 			usb->mcu.rp[i].value = val;
72 		}
73 	}
74 }
75 
76 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
77 {
78 	struct mt76_usb *usb = &dev->usb;
79 	struct mt76u_buf *buf = &usb->mcu.res;
80 	struct urb *urb = buf->urb;
81 	int i, ret;
82 	u32 rxfce;
83 	u8 *data;
84 
85 	for (i = 0; i < 5; i++) {
86 		if (!wait_for_completion_timeout(&usb->mcu.cmpl,
87 						 msecs_to_jiffies(300)))
88 			continue;
89 
90 		if (urb->status)
91 			return -EIO;
92 
93 		data = sg_virt(&urb->sg[0]);
94 		if (usb->mcu.rp)
95 			mt76x02u_multiple_mcu_reads(dev, data + 4,
96 						    urb->actual_length - 8);
97 
98 		rxfce = get_unaligned_le32(data);
99 		ret = mt76u_submit_buf(dev, USB_DIR_IN,
100 				       MT_EP_IN_CMD_RESP,
101 				       buf, GFP_KERNEL,
102 				       mt76u_mcu_complete_urb,
103 				       &usb->mcu.cmpl);
104 		if (ret)
105 			return ret;
106 
107 		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
108 		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
109 			return 0;
110 
111 		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
112 			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
113 			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
114 	}
115 
116 	dev_err(dev->dev, "error: %s timed out\n", __func__);
117 	return -ETIMEDOUT;
118 }
119 
120 static int
121 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
122 			int cmd, bool wait_resp)
123 {
124 	struct usb_interface *intf = to_usb_interface(dev->dev);
125 	struct usb_device *udev = interface_to_usbdev(intf);
126 	struct mt76_usb *usb = &dev->usb;
127 	unsigned int pipe;
128 	int ret, sent;
129 	u8 seq = 0;
130 	u32 info;
131 
132 	if (test_bit(MT76_REMOVED, &dev->state))
133 		return 0;
134 
135 	pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
136 	if (wait_resp) {
137 		seq = ++usb->mcu.msg_seq & 0xf;
138 		if (!seq)
139 			seq = ++usb->mcu.msg_seq & 0xf;
140 	}
141 
142 	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
143 	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
144 	       MT_MCU_MSG_TYPE_CMD;
145 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
146 	if (ret)
147 		return ret;
148 
149 	ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
150 	if (ret)
151 		return ret;
152 
153 	if (wait_resp)
154 		ret = mt76x02u_mcu_wait_resp(dev, seq);
155 
156 	consume_skb(skb);
157 
158 	return ret;
159 }
160 
161 static int
162 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
163 		      int len, bool wait_resp)
164 {
165 	struct mt76_usb *usb = &dev->usb;
166 	struct sk_buff *skb;
167 	int err;
168 
169 	skb = mt76x02u_mcu_msg_alloc(data, len);
170 	if (!skb)
171 		return -ENOMEM;
172 
173 	mutex_lock(&usb->mcu.mutex);
174 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
175 	mutex_unlock(&usb->mcu.mutex);
176 
177 	return err;
178 }
179 
180 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
181 {
182 	put_unaligned_le32(val, skb_put(skb, 4));
183 }
184 
185 static int
186 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
187 		   const struct mt76_reg_pair *data, int n)
188 {
189 	const int CMD_RANDOM_WRITE = 12;
190 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
191 	struct mt76_usb *usb = &dev->usb;
192 	struct sk_buff *skb;
193 	int cnt, i, ret;
194 
195 	if (!n)
196 		return 0;
197 
198 	cnt = min(max_vals_per_cmd, n);
199 
200 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
201 	if (!skb)
202 		return -ENOMEM;
203 	skb_reserve(skb, MT_DMA_HDR_LEN);
204 
205 	for (i = 0; i < cnt; i++) {
206 		skb_put_le32(skb, base + data[i].reg);
207 		skb_put_le32(skb, data[i].value);
208 	}
209 
210 	mutex_lock(&usb->mcu.mutex);
211 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
212 	mutex_unlock(&usb->mcu.mutex);
213 	if (ret)
214 		return ret;
215 
216 	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
217 }
218 
219 static int
220 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
221 		   struct mt76_reg_pair *data, int n)
222 {
223 	const int CMD_RANDOM_READ = 10;
224 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
225 	struct mt76_usb *usb = &dev->usb;
226 	struct sk_buff *skb;
227 	int cnt, i, ret;
228 
229 	if (!n)
230 		return 0;
231 
232 	cnt = min(max_vals_per_cmd, n);
233 	if (cnt != n)
234 		return -EINVAL;
235 
236 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
237 	if (!skb)
238 		return -ENOMEM;
239 	skb_reserve(skb, MT_DMA_HDR_LEN);
240 
241 	for (i = 0; i < cnt; i++) {
242 		skb_put_le32(skb, base + data[i].reg);
243 		skb_put_le32(skb, data[i].value);
244 	}
245 
246 	mutex_lock(&usb->mcu.mutex);
247 
248 	usb->mcu.rp = data;
249 	usb->mcu.rp_len = n;
250 	usb->mcu.base = base;
251 	usb->mcu.burst = false;
252 
253 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
254 
255 	usb->mcu.rp = NULL;
256 
257 	mutex_unlock(&usb->mcu.mutex);
258 
259 	return ret;
260 }
261 
262 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
263 {
264 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
265 			     USB_DIR_OUT | USB_TYPE_VENDOR,
266 			     0x1, 0, NULL, 0);
267 }
268 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
269 
270 static int
271 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
272 			    const void *fw_data, int len, u32 dst_addr)
273 {
274 	u8 *data = sg_virt(&buf->urb->sg[0]);
275 	DECLARE_COMPLETION_ONSTACK(cmpl);
276 	__le32 info;
277 	u32 val;
278 	int err;
279 
280 	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
281 			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
282 			   MT_MCU_MSG_TYPE_CMD);
283 
284 	memcpy(data, &info, sizeof(info));
285 	memcpy(data + sizeof(info), fw_data, len);
286 	memset(data + sizeof(info) + len, 0, 4);
287 
288 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
289 			MT_FCE_DMA_ADDR, dst_addr);
290 	len = roundup(len, 4);
291 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
292 			MT_FCE_DMA_LEN, len << 16);
293 
294 	buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
295 	err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
296 			       MT_EP_OUT_INBAND_CMD,
297 			       buf, GFP_KERNEL,
298 			       mt76u_mcu_complete_urb, &cmpl);
299 	if (err < 0)
300 		return err;
301 
302 	if (!wait_for_completion_timeout(&cmpl,
303 					 msecs_to_jiffies(1000))) {
304 		dev_err(dev->mt76.dev, "firmware upload timed out\n");
305 		usb_kill_urb(buf->urb);
306 		return -ETIMEDOUT;
307 	}
308 
309 	if (mt76u_urb_error(buf->urb)) {
310 		dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
311 			buf->urb->status);
312 		return buf->urb->status;
313 	}
314 
315 	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
316 	val++;
317 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
318 
319 	return 0;
320 }
321 
322 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
323 			      int data_len, u32 max_payload, u32 offset)
324 {
325 	int err, len, pos = 0, max_len = max_payload - 8;
326 	struct mt76u_buf buf;
327 
328 	err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
329 			      GFP_KERNEL);
330 	if (err < 0)
331 		return err;
332 
333 	while (data_len > 0) {
334 		len = min_t(int, data_len, max_len);
335 		err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
336 						  len, offset + pos);
337 		if (err < 0)
338 			break;
339 
340 		data_len -= len;
341 		pos += len;
342 		usleep_range(5000, 10000);
343 	}
344 	mt76u_buf_free(&buf);
345 
346 	return err;
347 }
348 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
349 
350 void mt76x02u_init_mcu(struct mt76_dev *dev)
351 {
352 	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
353 		.mcu_send_msg = mt76x02u_mcu_send_msg,
354 		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
355 		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
356 	};
357 
358 	dev->mcu_ops = &mt76x02u_mcu_ops;
359 }
360 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
361 
362 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
363 MODULE_LICENSE("Dual BSD/GPL");
364