1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  */
5 
6 #include <linux/module.h>
7 #include <linux/firmware.h>
8 
9 #include "mt76x02.h"
10 #include "mt76x02_mcu.h"
11 #include "mt76x02_usb.h"
12 
13 #define MT_CMD_HDR_LEN			4
14 
15 #define MT_FCE_DMA_ADDR			0x0230
16 #define MT_FCE_DMA_LEN			0x0234
17 
18 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
19 
20 static void
21 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
22 {
23 	struct mt76_usb *usb = &dev->usb;
24 	u32 reg, val;
25 	int i;
26 
27 	if (usb->mcu.burst) {
28 		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
29 
30 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
31 		for (i = 0; i < usb->mcu.rp_len; i++) {
32 			val = get_unaligned_le32(data + 4 * i);
33 			usb->mcu.rp[i].reg = reg++;
34 			usb->mcu.rp[i].value = val;
35 		}
36 	} else {
37 		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
38 
39 		for (i = 0; i < usb->mcu.rp_len; i++) {
40 			reg = get_unaligned_le32(data + 8 * i) -
41 			      usb->mcu.base;
42 			val = get_unaligned_le32(data + 8 * i + 4);
43 
44 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
45 			usb->mcu.rp[i].value = val;
46 		}
47 	}
48 }
49 
50 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
51 {
52 	struct mt76_usb *usb = &dev->usb;
53 	u8 *data = usb->mcu.data;
54 	int i, len, ret;
55 	u32 rxfce;
56 
57 	for (i = 0; i < 5; i++) {
58 		ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
59 				     300, MT_EP_IN_CMD_RESP);
60 		if (ret == -ETIMEDOUT)
61 			continue;
62 		if (ret)
63 			goto out;
64 
65 		if (usb->mcu.rp)
66 			mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
67 
68 		rxfce = get_unaligned_le32(data);
69 		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
70 		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
71 			return 0;
72 
73 		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
74 			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
75 			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
76 	}
77 out:
78 	dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
79 	return ret;
80 }
81 
82 static int
83 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
84 			int cmd, bool wait_resp)
85 {
86 	u8 seq = 0;
87 	u32 info;
88 	int ret;
89 
90 	if (test_bit(MT76_REMOVED, &dev->phy.state)) {
91 		ret = 0;
92 		goto out;
93 	}
94 
95 	if (wait_resp) {
96 		seq = ++dev->mcu.msg_seq & 0xf;
97 		if (!seq)
98 			seq = ++dev->mcu.msg_seq & 0xf;
99 	}
100 
101 	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
102 	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
103 	       MT_MCU_MSG_TYPE_CMD;
104 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
105 	if (ret)
106 		return ret;
107 
108 	ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
109 			     MT_EP_OUT_INBAND_CMD);
110 	if (ret)
111 		goto out;
112 
113 	if (wait_resp)
114 		ret = mt76x02u_mcu_wait_resp(dev, seq);
115 
116 out:
117 	consume_skb(skb);
118 
119 	return ret;
120 }
121 
122 static int
123 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
124 		      int len, bool wait_resp)
125 {
126 	struct sk_buff *skb;
127 	int err;
128 
129 	skb = mt76_mcu_msg_alloc(dev, data, len);
130 	if (!skb)
131 		return -ENOMEM;
132 
133 	mutex_lock(&dev->mcu.mutex);
134 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
135 	mutex_unlock(&dev->mcu.mutex);
136 
137 	return err;
138 }
139 
140 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
141 {
142 	put_unaligned_le32(val, skb_put(skb, 4));
143 }
144 
145 static int
146 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
147 		   const struct mt76_reg_pair *data, int n)
148 {
149 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
150 	const int CMD_RANDOM_WRITE = 12;
151 	struct sk_buff *skb;
152 	int cnt, i, ret;
153 
154 	if (!n)
155 		return 0;
156 
157 	cnt = min(max_vals_per_cmd, n);
158 
159 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
160 	if (!skb)
161 		return -ENOMEM;
162 	skb_reserve(skb, MT_DMA_HDR_LEN);
163 
164 	for (i = 0; i < cnt; i++) {
165 		skb_put_le32(skb, base + data[i].reg);
166 		skb_put_le32(skb, data[i].value);
167 	}
168 
169 	mutex_lock(&dev->mcu.mutex);
170 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
171 	mutex_unlock(&dev->mcu.mutex);
172 	if (ret)
173 		return ret;
174 
175 	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
176 }
177 
178 static int
179 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
180 		   struct mt76_reg_pair *data, int n)
181 {
182 	const int CMD_RANDOM_READ = 10;
183 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
184 	struct mt76_usb *usb = &dev->usb;
185 	struct sk_buff *skb;
186 	int cnt, i, ret;
187 
188 	if (!n)
189 		return 0;
190 
191 	cnt = min(max_vals_per_cmd, n);
192 	if (cnt != n)
193 		return -EINVAL;
194 
195 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
196 	if (!skb)
197 		return -ENOMEM;
198 	skb_reserve(skb, MT_DMA_HDR_LEN);
199 
200 	for (i = 0; i < cnt; i++) {
201 		skb_put_le32(skb, base + data[i].reg);
202 		skb_put_le32(skb, data[i].value);
203 	}
204 
205 	mutex_lock(&dev->mcu.mutex);
206 
207 	usb->mcu.rp = data;
208 	usb->mcu.rp_len = n;
209 	usb->mcu.base = base;
210 	usb->mcu.burst = false;
211 
212 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
213 
214 	usb->mcu.rp = NULL;
215 
216 	mutex_unlock(&dev->mcu.mutex);
217 
218 	return ret;
219 }
220 
221 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
222 {
223 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
224 			     USB_DIR_OUT | USB_TYPE_VENDOR,
225 			     0x1, 0, NULL, 0);
226 }
227 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
228 
229 static int
230 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
231 			    const void *fw_data, int len, u32 dst_addr)
232 {
233 	__le32 info;
234 	u32 val;
235 	int err, data_len;
236 
237 	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
238 			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
239 			   MT_MCU_MSG_TYPE_CMD);
240 
241 	memcpy(data, &info, sizeof(info));
242 	memcpy(data + sizeof(info), fw_data, len);
243 	memset(data + sizeof(info) + len, 0, 4);
244 
245 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
246 			MT_FCE_DMA_ADDR, dst_addr);
247 	len = roundup(len, 4);
248 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
249 			MT_FCE_DMA_LEN, len << 16);
250 
251 	data_len = MT_CMD_HDR_LEN + len + sizeof(info);
252 
253 	err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
254 			     MT_EP_OUT_INBAND_CMD);
255 	if (err) {
256 		dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
257 		return err;
258 	}
259 
260 	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
261 	val++;
262 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
263 
264 	return 0;
265 }
266 
267 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
268 			      int data_len, u32 max_payload, u32 offset)
269 {
270 	int len, err = 0, pos = 0, max_len = max_payload - 8;
271 	u8 *buf;
272 
273 	buf = kmalloc(max_payload, GFP_KERNEL);
274 	if (!buf)
275 		return -ENOMEM;
276 
277 	while (data_len > 0) {
278 		len = min_t(int, data_len, max_len);
279 		err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
280 						  len, offset + pos);
281 		if (err < 0)
282 			break;
283 
284 		data_len -= len;
285 		pos += len;
286 		usleep_range(5000, 10000);
287 	}
288 	kfree(buf);
289 
290 	return err;
291 }
292 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
293 
294 void mt76x02u_init_mcu(struct mt76_dev *dev)
295 {
296 	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
297 		.headroom = MT_CMD_HDR_LEN,
298 		.tailroom = 8,
299 		.mcu_send_msg = mt76x02u_mcu_send_msg,
300 		.mcu_parse_response = mt76x02_mcu_parse_response,
301 		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
302 		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
303 	};
304 
305 	dev->mcu_ops = &mt76x02u_mcu_ops;
306 }
307 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
308 
309 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
310 MODULE_LICENSE("Dual BSD/GPL");
311