xref: /openbmc/linux/drivers/net/wireless/ath/ath6kl/bmi.c (revision 0c30295e)
1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "hif-ops.h"
19 #include "target.h"
20 #include "debug.h"
21 
22 static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
23 {
24 	u32 addr;
25 	unsigned long timeout;
26 	int ret;
27 
28 	ar->bmi.cmd_credits = 0;
29 
30 	/* Read the counter register to get the command credits */
31 	addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
32 
33 	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 	while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
35 
36 		/*
37 		 * Hit the credit counter with a 4-byte access, the first byte
38 		 * read will hit the counter and cause a decrement, while the
39 		 * remaining 3 bytes has no effect. The rationale behind this
40 		 * is to make all HIF accesses 4-byte aligned.
41 		 */
42 		ret = hif_read_write_sync(ar, addr,
43 					 (u8 *)&ar->bmi.cmd_credits, 4,
44 					 HIF_RD_SYNC_BYTE_INC);
45 		if (ret) {
46 			ath6kl_err("Unable to decrement the command credit count register: %d\n",
47 				   ret);
48 			return ret;
49 		}
50 
51 		/* The counter is only 8 bits.
52 		 * Ignore anything in the upper 3 bytes
53 		 */
54 		ar->bmi.cmd_credits &= 0xFF;
55 	}
56 
57 	if (!ar->bmi.cmd_credits) {
58 		ath6kl_err("bmi communication timeout\n");
59 		return -ETIMEDOUT;
60 	}
61 
62 	return 0;
63 }
64 
65 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
66 {
67 	unsigned long timeout;
68 	u32 rx_word = 0;
69 	int ret = 0;
70 
71 	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 	while (time_before(jiffies, timeout) && !rx_word) {
73 		ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 					  (u8 *)&rx_word, sizeof(rx_word),
75 					  HIF_RD_SYNC_BYTE_INC);
76 		if (ret) {
77 			ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
78 			return ret;
79 		}
80 
81 		 /* all we really want is one bit */
82 		rx_word &= (1 << ENDPOINT1);
83 	}
84 
85 	if (!rx_word) {
86 		ath6kl_err("bmi_recv_buf FIFO empty\n");
87 		return -EINVAL;
88 	}
89 
90 	return ret;
91 }
92 
93 static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
94 {
95 	int ret;
96 	u32 addr;
97 
98 	ret = ath6kl_get_bmi_cmd_credits(ar);
99 	if (ret)
100 		return ret;
101 
102 	addr = ar->mbox_info.htc_addr;
103 
104 	ret = hif_read_write_sync(ar, addr, buf, len,
105 				  HIF_WR_SYNC_BYTE_INC);
106 	if (ret)
107 		ath6kl_err("unable to send the bmi data to the device\n");
108 
109 	return ret;
110 }
111 
112 static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
113 {
114 	int ret;
115 	u32 addr;
116 
117 	/*
118 	 * During normal bootup, small reads may be required.
119 	 * Rather than issue an HIF Read and then wait as the Target
120 	 * adds successive bytes to the FIFO, we wait here until
121 	 * we know that response data is available.
122 	 *
123 	 * This allows us to cleanly timeout on an unexpected
124 	 * Target failure rather than risk problems at the HIF level.
125 	 * In particular, this avoids SDIO timeouts and possibly garbage
126 	 * data on some host controllers.  And on an interconnect
127 	 * such as Compact Flash (as well as some SDIO masters) which
128 	 * does not provide any indication on data timeout, it avoids
129 	 * a potential hang or garbage response.
130 	 *
131 	 * Synchronization is more difficult for reads larger than the
132 	 * size of the MBOX FIFO (128B), because the Target is unable
133 	 * to push the 129th byte of data until AFTER the Host posts an
134 	 * HIF Read and removes some FIFO data.  So for large reads the
135 	 * Host proceeds to post an HIF Read BEFORE all the data is
136 	 * actually available to read.  Fortunately, large BMI reads do
137 	 * not occur in practice -- they're supported for debug/development.
138 	 *
139 	 * So Host/Target BMI synchronization is divided into these cases:
140 	 *  CASE 1: length < 4
141 	 *        Should not happen
142 	 *
143 	 *  CASE 2: 4 <= length <= 128
144 	 *        Wait for first 4 bytes to be in FIFO
145 	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
146 	 *        a BMI command credit, which indicates that the ENTIRE
147 	 *        response is available in the the FIFO
148 	 *
149 	 *  CASE 3: length > 128
150 	 *        Wait for the first 4 bytes to be in FIFO
151 	 *
152 	 * For most uses, a small timeout should be sufficient and we will
153 	 * usually see a response quickly; but there may be some unusual
154 	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
155 	 * For now, we use an unbounded busy loop while waiting for
156 	 * BMI_EXECUTE.
157 	 *
158 	 * If BMI_EXECUTE ever needs to support longer-latency execution,
159 	 * especially in production, this code needs to be enhanced to sleep
160 	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
161 	 * a function of Host processor speed.
162 	 */
163 	if (len >= 4) { /* NB: Currently, always true */
164 		ret = ath6kl_bmi_get_rx_lkahd(ar);
165 		if (ret)
166 			return ret;
167 	}
168 
169 	addr = ar->mbox_info.htc_addr;
170 	ret = hif_read_write_sync(ar, addr, buf, len,
171 				  HIF_RD_SYNC_BYTE_INC);
172 	if (ret) {
173 		ath6kl_err("Unable to read the bmi data from the device: %d\n",
174 			   ret);
175 		return ret;
176 	}
177 
178 	return 0;
179 }
180 
181 int ath6kl_bmi_done(struct ath6kl *ar)
182 {
183 	int ret;
184 	u32 cid = BMI_DONE;
185 
186 	if (ar->bmi.done_sent) {
187 		ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
188 		return 0;
189 	}
190 
191 	ar->bmi.done_sent = true;
192 
193 	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
194 	if (ret) {
195 		ath6kl_err("Unable to send bmi done: %d\n", ret);
196 		return ret;
197 	}
198 
199 	return 0;
200 }
201 
202 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
203 			       struct ath6kl_bmi_target_info *targ_info)
204 {
205 	int ret;
206 	u32 cid = BMI_GET_TARGET_INFO;
207 
208 	if (ar->bmi.done_sent) {
209 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
210 		return -EACCES;
211 	}
212 
213 	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
214 	if (ret) {
215 		ath6kl_err("Unable to send get target info: %d\n", ret);
216 		return ret;
217 	}
218 
219 	ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
220 				  sizeof(targ_info->version));
221 	if (ret) {
222 		ath6kl_err("Unable to recv target info: %d\n", ret);
223 		return ret;
224 	}
225 
226 	if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
227 		/* Determine how many bytes are in the Target's targ_info */
228 		ret = ath6kl_bmi_recv_buf(ar,
229 				   (u8 *)&targ_info->byte_count,
230 				   sizeof(targ_info->byte_count));
231 		if (ret) {
232 			ath6kl_err("unable to read target info byte count: %d\n",
233 				   ret);
234 			return ret;
235 		}
236 
237 		/*
238 		 * The target's targ_info doesn't match the host's targ_info.
239 		 * We need to do some backwards compatibility to make this work.
240 		 */
241 		if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
242 			WARN_ON(1);
243 			return -EINVAL;
244 		}
245 
246 		/* Read the remainder of the targ_info */
247 		ret = ath6kl_bmi_recv_buf(ar,
248 				   ((u8 *)targ_info) +
249 				   sizeof(targ_info->byte_count),
250 				   sizeof(*targ_info) -
251 				   sizeof(targ_info->byte_count));
252 
253 		if (ret) {
254 			ath6kl_err("Unable to read target info (%d bytes): %d\n",
255 				   targ_info->byte_count, ret);
256 			return ret;
257 		}
258 	}
259 
260 	ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
261 		targ_info->version, targ_info->type);
262 
263 	return 0;
264 }
265 
266 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
267 {
268 	u32 cid = BMI_READ_MEMORY;
269 	int ret;
270 	u32 offset;
271 	u32 len_remain, rx_len;
272 	u16 size;
273 
274 	if (ar->bmi.done_sent) {
275 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
276 		return -EACCES;
277 	}
278 
279 	size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
280 	if (size > MAX_BMI_CMDBUF_SZ) {
281 		WARN_ON(1);
282 		return -EINVAL;
283 	}
284 	memset(ar->bmi.cmd_buf, 0, size);
285 
286 	ath6kl_dbg(ATH6KL_DBG_BMI,
287 		   "bmi read memory: device: addr: 0x%x, len: %d\n",
288 		   addr, len);
289 
290 	len_remain = len;
291 
292 	while (len_remain) {
293 		rx_len = (len_remain < BMI_DATASZ_MAX) ?
294 					len_remain : BMI_DATASZ_MAX;
295 		offset = 0;
296 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
297 		offset += sizeof(cid);
298 		memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
299 		offset += sizeof(addr);
300 		memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
301 		offset += sizeof(len);
302 
303 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
304 		if (ret) {
305 			ath6kl_err("Unable to write to the device: %d\n",
306 				   ret);
307 			return ret;
308 		}
309 		ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
310 		if (ret) {
311 			ath6kl_err("Unable to read from the device: %d\n",
312 				   ret);
313 			return ret;
314 		}
315 		memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
316 		len_remain -= rx_len; addr += rx_len;
317 	}
318 
319 	return 0;
320 }
321 
322 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
323 {
324 	u32 cid = BMI_WRITE_MEMORY;
325 	int ret;
326 	u32 offset;
327 	u32 len_remain, tx_len;
328 	const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
329 	u8 aligned_buf[BMI_DATASZ_MAX];
330 	u8 *src;
331 
332 	if (ar->bmi.done_sent) {
333 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
334 		return -EACCES;
335 	}
336 
337 	if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
338 		WARN_ON(1);
339 		return -EINVAL;
340 	}
341 
342 	memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
343 
344 	ath6kl_dbg(ATH6KL_DBG_BMI,
345 		  "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
346 
347 	len_remain = len;
348 	while (len_remain) {
349 		src = &buf[len - len_remain];
350 
351 		if (len_remain < (BMI_DATASZ_MAX - header)) {
352 			if (len_remain & 3) {
353 				/* align it with 4 bytes */
354 				len_remain = len_remain +
355 					     (4 - (len_remain & 3));
356 				memcpy(aligned_buf, src, len_remain);
357 				src = aligned_buf;
358 			}
359 			tx_len = len_remain;
360 		} else {
361 			tx_len = (BMI_DATASZ_MAX - header);
362 		}
363 
364 		offset = 0;
365 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
366 		offset += sizeof(cid);
367 		memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
368 		offset += sizeof(addr);
369 		memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
370 		offset += sizeof(tx_len);
371 		memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
372 		offset += tx_len;
373 
374 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
375 		if (ret) {
376 			ath6kl_err("Unable to write to the device: %d\n",
377 				   ret);
378 			return ret;
379 		}
380 		len_remain -= tx_len; addr += tx_len;
381 	}
382 
383 	return 0;
384 }
385 
386 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
387 {
388 	u32 cid = BMI_EXECUTE;
389 	int ret;
390 	u32 offset;
391 	u16 size;
392 
393 	if (ar->bmi.done_sent) {
394 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
395 		return -EACCES;
396 	}
397 
398 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
399 	if (size > MAX_BMI_CMDBUF_SZ) {
400 		WARN_ON(1);
401 		return -EINVAL;
402 	}
403 	memset(ar->bmi.cmd_buf, 0, size);
404 
405 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
406 		   addr, *param);
407 
408 	offset = 0;
409 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
410 	offset += sizeof(cid);
411 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
412 	offset += sizeof(addr);
413 	memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
414 	offset += sizeof(*param);
415 
416 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
417 	if (ret) {
418 		ath6kl_err("Unable to write to the device: %d\n", ret);
419 		return ret;
420 	}
421 
422 	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
423 	if (ret) {
424 		ath6kl_err("Unable to read from the device: %d\n", ret);
425 		return ret;
426 	}
427 
428 	memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
429 
430 	return 0;
431 }
432 
433 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
434 {
435 	u32 cid = BMI_SET_APP_START;
436 	int ret;
437 	u32 offset;
438 	u16 size;
439 
440 	if (ar->bmi.done_sent) {
441 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
442 		return -EACCES;
443 	}
444 
445 	size = sizeof(cid) + sizeof(addr);
446 	if (size > MAX_BMI_CMDBUF_SZ) {
447 		WARN_ON(1);
448 		return -EINVAL;
449 	}
450 	memset(ar->bmi.cmd_buf, 0, size);
451 
452 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
453 
454 	offset = 0;
455 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
456 	offset += sizeof(cid);
457 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
458 	offset += sizeof(addr);
459 
460 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
461 	if (ret) {
462 		ath6kl_err("Unable to write to the device: %d\n", ret);
463 		return ret;
464 	}
465 
466 	return 0;
467 }
468 
469 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
470 {
471 	u32 cid = BMI_READ_SOC_REGISTER;
472 	int ret;
473 	u32 offset;
474 	u16 size;
475 
476 	if (ar->bmi.done_sent) {
477 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
478 		return -EACCES;
479 	}
480 
481 	size = sizeof(cid) + sizeof(addr);
482 	if (size > MAX_BMI_CMDBUF_SZ) {
483 		WARN_ON(1);
484 		return -EINVAL;
485 	}
486 	memset(ar->bmi.cmd_buf, 0, size);
487 
488 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
489 
490 	offset = 0;
491 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
492 	offset += sizeof(cid);
493 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
494 	offset += sizeof(addr);
495 
496 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
497 	if (ret) {
498 		ath6kl_err("Unable to write to the device: %d\n", ret);
499 		return ret;
500 	}
501 
502 	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
503 	if (ret) {
504 		ath6kl_err("Unable to read from the device: %d\n", ret);
505 		return ret;
506 	}
507 	memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
508 
509 	return 0;
510 }
511 
512 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
513 {
514 	u32 cid = BMI_WRITE_SOC_REGISTER;
515 	int ret;
516 	u32 offset;
517 	u16 size;
518 
519 	if (ar->bmi.done_sent) {
520 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
521 		return -EACCES;
522 	}
523 
524 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
525 	if (size > MAX_BMI_CMDBUF_SZ) {
526 		WARN_ON(1);
527 		return -EINVAL;
528 	}
529 	memset(ar->bmi.cmd_buf, 0, size);
530 
531 	ath6kl_dbg(ATH6KL_DBG_BMI,
532 		   "bmi write SOC reg: addr: 0x%x, param: %d\n",
533 		    addr, param);
534 
535 	offset = 0;
536 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
537 	offset += sizeof(cid);
538 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
539 	offset += sizeof(addr);
540 	memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
541 	offset += sizeof(param);
542 
543 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
544 	if (ret) {
545 		ath6kl_err("Unable to write to the device: %d\n", ret);
546 		return ret;
547 	}
548 
549 	return 0;
550 }
551 
552 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
553 {
554 	u32 cid = BMI_LZ_DATA;
555 	int ret;
556 	u32 offset;
557 	u32 len_remain, tx_len;
558 	const u32 header = sizeof(cid) + sizeof(len);
559 	u16 size;
560 
561 	if (ar->bmi.done_sent) {
562 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
563 		return -EACCES;
564 	}
565 
566 	size = BMI_DATASZ_MAX + header;
567 	if (size > MAX_BMI_CMDBUF_SZ) {
568 		WARN_ON(1);
569 		return -EINVAL;
570 	}
571 	memset(ar->bmi.cmd_buf, 0, size);
572 
573 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
574 		   len);
575 
576 	len_remain = len;
577 	while (len_remain) {
578 		tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
579 			  len_remain : (BMI_DATASZ_MAX - header);
580 
581 		offset = 0;
582 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
583 		offset += sizeof(cid);
584 		memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
585 		offset += sizeof(tx_len);
586 		memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
587 			tx_len);
588 		offset += tx_len;
589 
590 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
591 		if (ret) {
592 			ath6kl_err("Unable to write to the device: %d\n",
593 				   ret);
594 			return ret;
595 		}
596 
597 		len_remain -= tx_len;
598 	}
599 
600 	return 0;
601 }
602 
603 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
604 {
605 	u32 cid = BMI_LZ_STREAM_START;
606 	int ret;
607 	u32 offset;
608 	u16 size;
609 
610 	if (ar->bmi.done_sent) {
611 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
612 		return -EACCES;
613 	}
614 
615 	size = sizeof(cid) + sizeof(addr);
616 	if (size > MAX_BMI_CMDBUF_SZ) {
617 		WARN_ON(1);
618 		return -EINVAL;
619 	}
620 	memset(ar->bmi.cmd_buf, 0, size);
621 
622 	ath6kl_dbg(ATH6KL_DBG_BMI,
623 		   "bmi LZ stream start: addr: 0x%x)\n",
624 		    addr);
625 
626 	offset = 0;
627 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
628 	offset += sizeof(cid);
629 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
630 	offset += sizeof(addr);
631 
632 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
633 	if (ret) {
634 		ath6kl_err("Unable to start LZ stream to the device: %d\n",
635 			   ret);
636 		return ret;
637 	}
638 
639 	return 0;
640 }
641 
642 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
643 {
644 	int ret;
645 	u32 last_word = 0;
646 	u32 last_word_offset = len & ~0x3;
647 	u32 unaligned_bytes = len & 0x3;
648 
649 	ret = ath6kl_bmi_lz_stream_start(ar, addr);
650 	if (ret)
651 		return ret;
652 
653 	if (unaligned_bytes) {
654 		/* copy the last word into a zero padded buffer */
655 		memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
656 	}
657 
658 	ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
659 	if (ret)
660 		return ret;
661 
662 	if (unaligned_bytes)
663 		ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
664 
665 	if (!ret) {
666 		/* Close compressed stream and open a new (fake) one.
667 		 * This serves mainly to flush Target caches. */
668 		ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
669 	}
670 	return ret;
671 }
672 
673 int ath6kl_bmi_init(struct ath6kl *ar)
674 {
675 	ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
676 
677 	if (!ar->bmi.cmd_buf)
678 		return -ENOMEM;
679 
680 	return 0;
681 }
682 
683 void ath6kl_bmi_cleanup(struct ath6kl *ar)
684 {
685 	kfree(ar->bmi.cmd_buf);
686 	ar->bmi.cmd_buf = NULL;
687 }
688