xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/bmi.c (revision 74ce1896)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bmi.h"
19 #include "hif.h"
20 #include "debug.h"
21 #include "htc.h"
22 #include "hw.h"
23 
24 void ath10k_bmi_start(struct ath10k *ar)
25 {
26 	int ret;
27 
28 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
29 
30 	ar->bmi.done_sent = false;
31 
32 	/* Enable hardware clock to speed up firmware download */
33 	if (ar->hw_params.hw_ops->enable_pll_clk) {
34 		ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
35 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
36 	}
37 }
38 
39 int ath10k_bmi_done(struct ath10k *ar)
40 {
41 	struct bmi_cmd cmd;
42 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
43 	int ret;
44 
45 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
46 
47 	if (ar->bmi.done_sent) {
48 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
49 		return 0;
50 	}
51 
52 	ar->bmi.done_sent = true;
53 	cmd.id = __cpu_to_le32(BMI_DONE);
54 
55 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
56 	if (ret) {
57 		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
58 		return ret;
59 	}
60 
61 	return 0;
62 }
63 
64 int ath10k_bmi_get_target_info(struct ath10k *ar,
65 			       struct bmi_target_info *target_info)
66 {
67 	struct bmi_cmd cmd;
68 	union bmi_resp resp;
69 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
70 	u32 resplen = sizeof(resp.get_target_info);
71 	int ret;
72 
73 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
74 
75 	if (ar->bmi.done_sent) {
76 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
77 		return -EBUSY;
78 	}
79 
80 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
81 
82 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
83 	if (ret) {
84 		ath10k_warn(ar, "unable to get target info from device\n");
85 		return ret;
86 	}
87 
88 	if (resplen < sizeof(resp.get_target_info)) {
89 		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
90 			    resplen);
91 		return -EIO;
92 	}
93 
94 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
95 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
96 
97 	return 0;
98 }
99 
100 #define TARGET_VERSION_SENTINAL 0xffffffffu
101 
102 int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
103 				    struct bmi_target_info *target_info)
104 {
105 	struct bmi_cmd cmd;
106 	union bmi_resp resp;
107 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
108 	u32 resplen, ver_len;
109 	__le32 tmp;
110 	int ret;
111 
112 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
113 
114 	if (ar->bmi.done_sent) {
115 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
116 		return -EBUSY;
117 	}
118 
119 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
120 
121 	/* Step 1: Read 4 bytes of the target info and check if it is
122 	 * the special sentinal version word or the first word in the
123 	 * version response.
124 	 */
125 	resplen = sizeof(u32);
126 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
127 	if (ret) {
128 		ath10k_warn(ar, "unable to read from device\n");
129 		return ret;
130 	}
131 
132 	/* Some SDIO boards have a special sentinal byte before the real
133 	 * version response.
134 	 */
135 	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
136 		/* Step 1b: Read the version length */
137 		resplen = sizeof(u32);
138 		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
139 						  &resplen);
140 		if (ret) {
141 			ath10k_warn(ar, "unable to read from device\n");
142 			return ret;
143 		}
144 	}
145 
146 	ver_len = __le32_to_cpu(tmp);
147 
148 	/* Step 2: Check the target info length */
149 	if (ver_len != sizeof(resp.get_target_info)) {
150 		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
151 			    ver_len, sizeof(resp.get_target_info));
152 		return -EINVAL;
153 	}
154 
155 	/* Step 3: Read the rest of the version response */
156 	resplen = sizeof(resp.get_target_info) - sizeof(u32);
157 	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
158 					  &resp.get_target_info.version,
159 					  &resplen);
160 	if (ret) {
161 		ath10k_warn(ar, "unable to read from device\n");
162 		return ret;
163 	}
164 
165 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
166 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
167 
168 	return 0;
169 }
170 
171 int ath10k_bmi_read_memory(struct ath10k *ar,
172 			   u32 address, void *buffer, u32 length)
173 {
174 	struct bmi_cmd cmd;
175 	union bmi_resp resp;
176 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
177 	u32 rxlen;
178 	int ret;
179 
180 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
181 		   address, length);
182 
183 	if (ar->bmi.done_sent) {
184 		ath10k_warn(ar, "command disallowed\n");
185 		return -EBUSY;
186 	}
187 
188 	while (length) {
189 		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
190 
191 		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
192 		cmd.read_mem.addr = __cpu_to_le32(address);
193 		cmd.read_mem.len  = __cpu_to_le32(rxlen);
194 
195 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
196 						  &resp, &rxlen);
197 		if (ret) {
198 			ath10k_warn(ar, "unable to read from the device (%d)\n",
199 				    ret);
200 			return ret;
201 		}
202 
203 		memcpy(buffer, resp.read_mem.payload, rxlen);
204 		address += rxlen;
205 		buffer  += rxlen;
206 		length  -= rxlen;
207 	}
208 
209 	return 0;
210 }
211 
212 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
213 {
214 	struct bmi_cmd cmd;
215 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
216 	int ret;
217 
218 	ath10k_dbg(ar, ATH10K_DBG_BMI,
219 		   "bmi write soc register 0x%08x val 0x%08x\n",
220 		   address, reg_val);
221 
222 	if (ar->bmi.done_sent) {
223 		ath10k_warn(ar, "bmi write soc register command in progress\n");
224 		return -EBUSY;
225 	}
226 
227 	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
228 	cmd.write_soc_reg.addr = __cpu_to_le32(address);
229 	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
230 
231 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
232 	if (ret) {
233 		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
234 			    ret);
235 		return ret;
236 	}
237 
238 	return 0;
239 }
240 
241 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
242 {
243 	struct bmi_cmd cmd;
244 	union bmi_resp resp;
245 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
246 	u32 resplen = sizeof(resp.read_soc_reg);
247 	int ret;
248 
249 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
250 		   address);
251 
252 	if (ar->bmi.done_sent) {
253 		ath10k_warn(ar, "bmi read soc register command in progress\n");
254 		return -EBUSY;
255 	}
256 
257 	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
258 	cmd.read_soc_reg.addr = __cpu_to_le32(address);
259 
260 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
261 	if (ret) {
262 		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
263 			    ret);
264 		return ret;
265 	}
266 
267 	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
268 
269 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
270 		   *reg_val);
271 
272 	return 0;
273 }
274 
275 int ath10k_bmi_write_memory(struct ath10k *ar,
276 			    u32 address, const void *buffer, u32 length)
277 {
278 	struct bmi_cmd cmd;
279 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
280 	u32 txlen;
281 	int ret;
282 
283 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
284 		   address, length);
285 
286 	if (ar->bmi.done_sent) {
287 		ath10k_warn(ar, "command disallowed\n");
288 		return -EBUSY;
289 	}
290 
291 	while (length) {
292 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
293 
294 		/* copy before roundup to avoid reading beyond buffer*/
295 		memcpy(cmd.write_mem.payload, buffer, txlen);
296 		txlen = roundup(txlen, 4);
297 
298 		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
299 		cmd.write_mem.addr = __cpu_to_le32(address);
300 		cmd.write_mem.len  = __cpu_to_le32(txlen);
301 
302 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
303 						  NULL, NULL);
304 		if (ret) {
305 			ath10k_warn(ar, "unable to write to the device (%d)\n",
306 				    ret);
307 			return ret;
308 		}
309 
310 		/* fixup roundup() so `length` zeroes out for last chunk */
311 		txlen = min(txlen, length);
312 
313 		address += txlen;
314 		buffer  += txlen;
315 		length  -= txlen;
316 	}
317 
318 	return 0;
319 }
320 
321 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
322 {
323 	struct bmi_cmd cmd;
324 	union bmi_resp resp;
325 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
326 	u32 resplen = sizeof(resp.execute);
327 	int ret;
328 
329 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
330 		   address, param);
331 
332 	if (ar->bmi.done_sent) {
333 		ath10k_warn(ar, "command disallowed\n");
334 		return -EBUSY;
335 	}
336 
337 	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
338 	cmd.execute.addr  = __cpu_to_le32(address);
339 	cmd.execute.param = __cpu_to_le32(param);
340 
341 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
342 	if (ret) {
343 		ath10k_warn(ar, "unable to read from the device\n");
344 		return ret;
345 	}
346 
347 	if (resplen < sizeof(resp.execute)) {
348 		ath10k_warn(ar, "invalid execute response length (%d)\n",
349 			    resplen);
350 		return -EIO;
351 	}
352 
353 	*result = __le32_to_cpu(resp.execute.result);
354 
355 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
356 
357 	return 0;
358 }
359 
360 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
361 {
362 	struct bmi_cmd cmd;
363 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
364 	u32 txlen;
365 	int ret;
366 
367 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
368 		   buffer, length);
369 
370 	if (ar->bmi.done_sent) {
371 		ath10k_warn(ar, "command disallowed\n");
372 		return -EBUSY;
373 	}
374 
375 	while (length) {
376 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
377 
378 		WARN_ON_ONCE(txlen & 3);
379 
380 		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
381 		cmd.lz_data.len = __cpu_to_le32(txlen);
382 		memcpy(cmd.lz_data.payload, buffer, txlen);
383 
384 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
385 						  NULL, NULL);
386 		if (ret) {
387 			ath10k_warn(ar, "unable to write to the device\n");
388 			return ret;
389 		}
390 
391 		buffer += txlen;
392 		length -= txlen;
393 	}
394 
395 	return 0;
396 }
397 
398 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
399 {
400 	struct bmi_cmd cmd;
401 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
402 	int ret;
403 
404 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
405 		   address);
406 
407 	if (ar->bmi.done_sent) {
408 		ath10k_warn(ar, "command disallowed\n");
409 		return -EBUSY;
410 	}
411 
412 	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
413 	cmd.lz_start.addr = __cpu_to_le32(address);
414 
415 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
416 	if (ret) {
417 		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
418 		return ret;
419 	}
420 
421 	return 0;
422 }
423 
424 int ath10k_bmi_fast_download(struct ath10k *ar,
425 			     u32 address, const void *buffer, u32 length)
426 {
427 	u8 trailer[4] = {};
428 	u32 head_len = rounddown(length, 4);
429 	u32 trailer_len = length - head_len;
430 	int ret;
431 
432 	ath10k_dbg(ar, ATH10K_DBG_BMI,
433 		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
434 		   address, buffer, length);
435 
436 	ret = ath10k_bmi_lz_stream_start(ar, address);
437 	if (ret)
438 		return ret;
439 
440 	/* copy the last word into a zero padded buffer */
441 	if (trailer_len > 0)
442 		memcpy(trailer, buffer + head_len, trailer_len);
443 
444 	ret = ath10k_bmi_lz_data(ar, buffer, head_len);
445 	if (ret)
446 		return ret;
447 
448 	if (trailer_len > 0)
449 		ret = ath10k_bmi_lz_data(ar, trailer, 4);
450 
451 	if (ret != 0)
452 		return ret;
453 
454 	/*
455 	 * Close compressed stream and open a new (fake) one.
456 	 * This serves mainly to flush Target caches.
457 	 */
458 	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
459 
460 	return ret;
461 }
462