xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/bmi.c (revision bb0eb050)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bmi.h"
19 #include "hif.h"
20 #include "debug.h"
21 #include "htc.h"
22 #include "hw.h"
23 
24 void ath10k_bmi_start(struct ath10k *ar)
25 {
26 	int ret;
27 
28 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
29 
30 	ar->bmi.done_sent = false;
31 
32 	/* Enable hardware clock to speed up firmware download */
33 	if (ar->hw_params.hw_ops->enable_pll_clk) {
34 		ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
35 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
36 	}
37 }
38 
39 int ath10k_bmi_done(struct ath10k *ar)
40 {
41 	struct bmi_cmd cmd;
42 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
43 	int ret;
44 
45 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
46 
47 	if (ar->bmi.done_sent) {
48 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
49 		return 0;
50 	}
51 
52 	ar->bmi.done_sent = true;
53 	cmd.id = __cpu_to_le32(BMI_DONE);
54 
55 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
56 	if (ret) {
57 		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
58 		return ret;
59 	}
60 
61 	return 0;
62 }
63 
64 int ath10k_bmi_get_target_info(struct ath10k *ar,
65 			       struct bmi_target_info *target_info)
66 {
67 	struct bmi_cmd cmd;
68 	union bmi_resp resp;
69 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
70 	u32 resplen = sizeof(resp.get_target_info);
71 	int ret;
72 
73 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
74 
75 	if (ar->bmi.done_sent) {
76 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
77 		return -EBUSY;
78 	}
79 
80 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
81 
82 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
83 	if (ret) {
84 		ath10k_warn(ar, "unable to get target info from device\n");
85 		return ret;
86 	}
87 
88 	if (resplen < sizeof(resp.get_target_info)) {
89 		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
90 			    resplen);
91 		return -EIO;
92 	}
93 
94 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
95 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
96 
97 	return 0;
98 }
99 
100 int ath10k_bmi_read_memory(struct ath10k *ar,
101 			   u32 address, void *buffer, u32 length)
102 {
103 	struct bmi_cmd cmd;
104 	union bmi_resp resp;
105 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
106 	u32 rxlen;
107 	int ret;
108 
109 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
110 		   address, length);
111 
112 	if (ar->bmi.done_sent) {
113 		ath10k_warn(ar, "command disallowed\n");
114 		return -EBUSY;
115 	}
116 
117 	while (length) {
118 		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
119 
120 		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
121 		cmd.read_mem.addr = __cpu_to_le32(address);
122 		cmd.read_mem.len  = __cpu_to_le32(rxlen);
123 
124 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
125 						  &resp, &rxlen);
126 		if (ret) {
127 			ath10k_warn(ar, "unable to read from the device (%d)\n",
128 				    ret);
129 			return ret;
130 		}
131 
132 		memcpy(buffer, resp.read_mem.payload, rxlen);
133 		address += rxlen;
134 		buffer  += rxlen;
135 		length  -= rxlen;
136 	}
137 
138 	return 0;
139 }
140 
141 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
142 {
143 	struct bmi_cmd cmd;
144 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
145 	int ret;
146 
147 	ath10k_dbg(ar, ATH10K_DBG_BMI,
148 		   "bmi write soc register 0x%08x val 0x%08x\n",
149 		   address, reg_val);
150 
151 	if (ar->bmi.done_sent) {
152 		ath10k_warn(ar, "bmi write soc register command in progress\n");
153 		return -EBUSY;
154 	}
155 
156 	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
157 	cmd.write_soc_reg.addr = __cpu_to_le32(address);
158 	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
159 
160 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
161 	if (ret) {
162 		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
163 			    ret);
164 		return ret;
165 	}
166 
167 	return 0;
168 }
169 
170 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
171 {
172 	struct bmi_cmd cmd;
173 	union bmi_resp resp;
174 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
175 	u32 resplen = sizeof(resp.read_soc_reg);
176 	int ret;
177 
178 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
179 		   address);
180 
181 	if (ar->bmi.done_sent) {
182 		ath10k_warn(ar, "bmi read soc register command in progress\n");
183 		return -EBUSY;
184 	}
185 
186 	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
187 	cmd.read_soc_reg.addr = __cpu_to_le32(address);
188 
189 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
190 	if (ret) {
191 		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
192 			    ret);
193 		return ret;
194 	}
195 
196 	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
197 
198 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
199 		   *reg_val);
200 
201 	return 0;
202 }
203 
204 int ath10k_bmi_write_memory(struct ath10k *ar,
205 			    u32 address, const void *buffer, u32 length)
206 {
207 	struct bmi_cmd cmd;
208 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
209 	u32 txlen;
210 	int ret;
211 
212 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
213 		   address, length);
214 
215 	if (ar->bmi.done_sent) {
216 		ath10k_warn(ar, "command disallowed\n");
217 		return -EBUSY;
218 	}
219 
220 	while (length) {
221 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
222 
223 		/* copy before roundup to avoid reading beyond buffer*/
224 		memcpy(cmd.write_mem.payload, buffer, txlen);
225 		txlen = roundup(txlen, 4);
226 
227 		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
228 		cmd.write_mem.addr = __cpu_to_le32(address);
229 		cmd.write_mem.len  = __cpu_to_le32(txlen);
230 
231 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
232 						  NULL, NULL);
233 		if (ret) {
234 			ath10k_warn(ar, "unable to write to the device (%d)\n",
235 				    ret);
236 			return ret;
237 		}
238 
239 		/* fixup roundup() so `length` zeroes out for last chunk */
240 		txlen = min(txlen, length);
241 
242 		address += txlen;
243 		buffer  += txlen;
244 		length  -= txlen;
245 	}
246 
247 	return 0;
248 }
249 
250 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
251 {
252 	struct bmi_cmd cmd;
253 	union bmi_resp resp;
254 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
255 	u32 resplen = sizeof(resp.execute);
256 	int ret;
257 
258 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
259 		   address, param);
260 
261 	if (ar->bmi.done_sent) {
262 		ath10k_warn(ar, "command disallowed\n");
263 		return -EBUSY;
264 	}
265 
266 	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
267 	cmd.execute.addr  = __cpu_to_le32(address);
268 	cmd.execute.param = __cpu_to_le32(param);
269 
270 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
271 	if (ret) {
272 		ath10k_warn(ar, "unable to read from the device\n");
273 		return ret;
274 	}
275 
276 	if (resplen < sizeof(resp.execute)) {
277 		ath10k_warn(ar, "invalid execute response length (%d)\n",
278 			    resplen);
279 		return -EIO;
280 	}
281 
282 	*result = __le32_to_cpu(resp.execute.result);
283 
284 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
285 
286 	return 0;
287 }
288 
289 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
290 {
291 	struct bmi_cmd cmd;
292 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
293 	u32 txlen;
294 	int ret;
295 
296 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
297 		   buffer, length);
298 
299 	if (ar->bmi.done_sent) {
300 		ath10k_warn(ar, "command disallowed\n");
301 		return -EBUSY;
302 	}
303 
304 	while (length) {
305 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
306 
307 		WARN_ON_ONCE(txlen & 3);
308 
309 		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
310 		cmd.lz_data.len = __cpu_to_le32(txlen);
311 		memcpy(cmd.lz_data.payload, buffer, txlen);
312 
313 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
314 						  NULL, NULL);
315 		if (ret) {
316 			ath10k_warn(ar, "unable to write to the device\n");
317 			return ret;
318 		}
319 
320 		buffer += txlen;
321 		length -= txlen;
322 	}
323 
324 	return 0;
325 }
326 
327 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
328 {
329 	struct bmi_cmd cmd;
330 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
331 	int ret;
332 
333 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
334 		   address);
335 
336 	if (ar->bmi.done_sent) {
337 		ath10k_warn(ar, "command disallowed\n");
338 		return -EBUSY;
339 	}
340 
341 	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
342 	cmd.lz_start.addr = __cpu_to_le32(address);
343 
344 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
345 	if (ret) {
346 		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
347 		return ret;
348 	}
349 
350 	return 0;
351 }
352 
353 int ath10k_bmi_fast_download(struct ath10k *ar,
354 			     u32 address, const void *buffer, u32 length)
355 {
356 	u8 trailer[4] = {};
357 	u32 head_len = rounddown(length, 4);
358 	u32 trailer_len = length - head_len;
359 	int ret;
360 
361 	ath10k_dbg(ar, ATH10K_DBG_BMI,
362 		   "bmi fast download address 0x%x buffer 0x%pK length %d\n",
363 		   address, buffer, length);
364 
365 	ret = ath10k_bmi_lz_stream_start(ar, address);
366 	if (ret)
367 		return ret;
368 
369 	/* copy the last word into a zero padded buffer */
370 	if (trailer_len > 0)
371 		memcpy(trailer, buffer + head_len, trailer_len);
372 
373 	ret = ath10k_bmi_lz_data(ar, buffer, head_len);
374 	if (ret)
375 		return ret;
376 
377 	if (trailer_len > 0)
378 		ret = ath10k_bmi_lz_data(ar, trailer, 4);
379 
380 	if (ret != 0)
381 		return ret;
382 
383 	/*
384 	 * Close compressed stream and open a new (fake) one.
385 	 * This serves mainly to flush Target caches.
386 	 */
387 	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
388 
389 	return ret;
390 }
391