xref: /openbmc/linux/drivers/net/wireless/ath/ath6kl/bmi.c (revision bdcd8170)
1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "hif-ops.h"
19 #include "target.h"
20 #include "debug.h"
21 
22 static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
23 {
24 	u32 addr;
25 	unsigned long timeout;
26 	int ret;
27 
28 	ar->bmi.cmd_credits = 0;
29 
30 	/* Read the counter register to get the command credits */
31 	addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
32 
33 	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 	while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
35 
36 		/*
37 		 * Hit the credit counter with a 4-byte access, the first byte
38 		 * read will hit the counter and cause a decrement, while the
39 		 * remaining 3 bytes has no effect. The rationale behind this
40 		 * is to make all HIF accesses 4-byte aligned.
41 		 */
42 		ret = hif_read_write_sync(ar, addr,
43 					 (u8 *)&ar->bmi.cmd_credits, 4,
44 					 HIF_RD_SYNC_BYTE_INC);
45 		if (ret) {
46 			ath6kl_err("Unable to decrement the command credit count register: %d\n",
47 				   ret);
48 			return ret;
49 		}
50 
51 		/* The counter is only 8 bits.
52 		 * Ignore anything in the upper 3 bytes
53 		 */
54 		ar->bmi.cmd_credits &= 0xFF;
55 	}
56 
57 	if (!ar->bmi.cmd_credits) {
58 		ath6kl_err("bmi communication timeout\n");
59 		return -ETIMEDOUT;
60 	}
61 
62 	return 0;
63 }
64 
65 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar, bool need_timeout)
66 {
67 	unsigned long timeout;
68 	u32 rx_word = 0;
69 	int ret = 0;
70 
71 	timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 	while ((!need_timeout || time_before(jiffies, timeout)) && !rx_word) {
73 		ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 					  (u8 *)&rx_word, sizeof(rx_word),
75 					  HIF_RD_SYNC_BYTE_INC);
76 		if (ret) {
77 			ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
78 			return ret;
79 		}
80 
81 		 /* all we really want is one bit */
82 		rx_word &= (1 << ENDPOINT1);
83 	}
84 
85 	if (!rx_word) {
86 		ath6kl_err("bmi_recv_buf FIFO empty\n");
87 		return -EINVAL;
88 	}
89 
90 	return ret;
91 }
92 
93 static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
94 {
95 	int ret;
96 	u32 addr;
97 
98 	ret = ath6kl_get_bmi_cmd_credits(ar);
99 	if (ret)
100 		return ret;
101 
102 	addr = ar->mbox_info.htc_addr;
103 
104 	ret = hif_read_write_sync(ar, addr, buf, len,
105 				  HIF_WR_SYNC_BYTE_INC);
106 	if (ret)
107 		ath6kl_err("unable to send the bmi data to the device\n");
108 
109 	return ret;
110 }
111 
112 static int ath6kl_bmi_recv_buf(struct ath6kl *ar,
113 			u8 *buf, u32 len, bool want_timeout)
114 {
115 	int ret;
116 	u32 addr;
117 
118 	/*
119 	 * During normal bootup, small reads may be required.
120 	 * Rather than issue an HIF Read and then wait as the Target
121 	 * adds successive bytes to the FIFO, we wait here until
122 	 * we know that response data is available.
123 	 *
124 	 * This allows us to cleanly timeout on an unexpected
125 	 * Target failure rather than risk problems at the HIF level.
126 	 * In particular, this avoids SDIO timeouts and possibly garbage
127 	 * data on some host controllers.  And on an interconnect
128 	 * such as Compact Flash (as well as some SDIO masters) which
129 	 * does not provide any indication on data timeout, it avoids
130 	 * a potential hang or garbage response.
131 	 *
132 	 * Synchronization is more difficult for reads larger than the
133 	 * size of the MBOX FIFO (128B), because the Target is unable
134 	 * to push the 129th byte of data until AFTER the Host posts an
135 	 * HIF Read and removes some FIFO data.  So for large reads the
136 	 * Host proceeds to post an HIF Read BEFORE all the data is
137 	 * actually available to read.  Fortunately, large BMI reads do
138 	 * not occur in practice -- they're supported for debug/development.
139 	 *
140 	 * So Host/Target BMI synchronization is divided into these cases:
141 	 *  CASE 1: length < 4
142 	 *        Should not happen
143 	 *
144 	 *  CASE 2: 4 <= length <= 128
145 	 *        Wait for first 4 bytes to be in FIFO
146 	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
147 	 *        a BMI command credit, which indicates that the ENTIRE
148 	 *        response is available in the the FIFO
149 	 *
150 	 *  CASE 3: length > 128
151 	 *        Wait for the first 4 bytes to be in FIFO
152 	 *
153 	 * For most uses, a small timeout should be sufficient and we will
154 	 * usually see a response quickly; but there may be some unusual
155 	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
156 	 * For now, we use an unbounded busy loop while waiting for
157 	 * BMI_EXECUTE.
158 	 *
159 	 * If BMI_EXECUTE ever needs to support longer-latency execution,
160 	 * especially in production, this code needs to be enhanced to sleep
161 	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
162 	 * a function of Host processor speed.
163 	 */
164 	if (len >= 4) { /* NB: Currently, always true */
165 		ret = ath6kl_bmi_get_rx_lkahd(ar, want_timeout);
166 		if (ret)
167 			return ret;
168 	}
169 
170 	addr = ar->mbox_info.htc_addr;
171 	ret = hif_read_write_sync(ar, addr, buf, len,
172 				  HIF_RD_SYNC_BYTE_INC);
173 	if (ret) {
174 		ath6kl_err("Unable to read the bmi data from the device: %d\n",
175 			   ret);
176 		return ret;
177 	}
178 
179 	return 0;
180 }
181 
182 int ath6kl_bmi_done(struct ath6kl *ar)
183 {
184 	int ret;
185 	u32 cid = BMI_DONE;
186 
187 	if (ar->bmi.done_sent) {
188 		ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
189 		return 0;
190 	}
191 
192 	ar->bmi.done_sent = true;
193 
194 	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
195 	if (ret) {
196 		ath6kl_err("Unable to send bmi done: %d\n", ret);
197 		return ret;
198 	}
199 
200 	ath6kl_bmi_cleanup(ar);
201 
202 	return 0;
203 }
204 
205 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
206 			       struct ath6kl_bmi_target_info *targ_info)
207 {
208 	int ret;
209 	u32 cid = BMI_GET_TARGET_INFO;
210 
211 	if (ar->bmi.done_sent) {
212 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
213 		return -EACCES;
214 	}
215 
216 	ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
217 	if (ret) {
218 		ath6kl_err("Unable to send get target info: %d\n", ret);
219 		return ret;
220 	}
221 
222 	ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
223 			sizeof(targ_info->version), true);
224 	if (ret) {
225 		ath6kl_err("Unable to recv target info: %d\n", ret);
226 		return ret;
227 	}
228 
229 	if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
230 		/* Determine how many bytes are in the Target's targ_info */
231 		ret = ath6kl_bmi_recv_buf(ar,
232 				   (u8 *)&targ_info->byte_count,
233 				   sizeof(targ_info->byte_count),
234 				   true);
235 		if (ret) {
236 			ath6kl_err("unable to read target info byte count: %d\n",
237 				   ret);
238 			return ret;
239 		}
240 
241 		/*
242 		 * The target's targ_info doesn't match the host's targ_info.
243 		 * We need to do some backwards compatibility to make this work.
244 		 */
245 		if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
246 			WARN_ON(1);
247 			return -EINVAL;
248 		}
249 
250 		/* Read the remainder of the targ_info */
251 		ret = ath6kl_bmi_recv_buf(ar,
252 				   ((u8 *)targ_info) +
253 				   sizeof(targ_info->byte_count),
254 				   sizeof(*targ_info) -
255 				   sizeof(targ_info->byte_count),
256 				   true);
257 
258 		if (ret) {
259 			ath6kl_err("Unable to read target info (%d bytes): %d\n",
260 				   targ_info->byte_count, ret);
261 			return ret;
262 		}
263 	}
264 
265 	ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
266 		targ_info->version, targ_info->type);
267 
268 	return 0;
269 }
270 
271 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
272 {
273 	u32 cid = BMI_READ_MEMORY;
274 	int ret;
275 	u32 offset;
276 	u32 len_remain, rx_len;
277 	u16 size;
278 
279 	if (ar->bmi.done_sent) {
280 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
281 		return -EACCES;
282 	}
283 
284 	size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
285 	if (size > MAX_BMI_CMDBUF_SZ) {
286 		WARN_ON(1);
287 		return -EINVAL;
288 	}
289 	memset(ar->bmi.cmd_buf, 0, size);
290 
291 	ath6kl_dbg(ATH6KL_DBG_BMI,
292 		   "bmi read memory: device: addr: 0x%x, len: %d\n",
293 		   addr, len);
294 
295 	len_remain = len;
296 
297 	while (len_remain) {
298 		rx_len = (len_remain < BMI_DATASZ_MAX) ?
299 					len_remain : BMI_DATASZ_MAX;
300 		offset = 0;
301 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
302 		offset += sizeof(cid);
303 		memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
304 		offset += sizeof(addr);
305 		memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
306 		offset += sizeof(len);
307 
308 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
309 		if (ret) {
310 			ath6kl_err("Unable to write to the device: %d\n",
311 				   ret);
312 			return ret;
313 		}
314 		ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len, true);
315 		if (ret) {
316 			ath6kl_err("Unable to read from the device: %d\n",
317 				   ret);
318 			return ret;
319 		}
320 		memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
321 		len_remain -= rx_len; addr += rx_len;
322 	}
323 
324 	return 0;
325 }
326 
327 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
328 {
329 	u32 cid = BMI_WRITE_MEMORY;
330 	int ret;
331 	u32 offset;
332 	u32 len_remain, tx_len;
333 	const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
334 	u8 aligned_buf[BMI_DATASZ_MAX];
335 	u8 *src;
336 
337 	if (ar->bmi.done_sent) {
338 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
339 		return -EACCES;
340 	}
341 
342 	if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
343 		WARN_ON(1);
344 		return -EINVAL;
345 	}
346 
347 	memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
348 
349 	ath6kl_dbg(ATH6KL_DBG_BMI,
350 		  "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
351 
352 	len_remain = len;
353 	while (len_remain) {
354 		src = &buf[len - len_remain];
355 
356 		if (len_remain < (BMI_DATASZ_MAX - header)) {
357 			if (len_remain & 3) {
358 				/* align it with 4 bytes */
359 				len_remain = len_remain +
360 					     (4 - (len_remain & 3));
361 				memcpy(aligned_buf, src, len_remain);
362 				src = aligned_buf;
363 			}
364 			tx_len = len_remain;
365 		} else {
366 			tx_len = (BMI_DATASZ_MAX - header);
367 		}
368 
369 		offset = 0;
370 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
371 		offset += sizeof(cid);
372 		memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
373 		offset += sizeof(addr);
374 		memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
375 		offset += sizeof(tx_len);
376 		memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
377 		offset += tx_len;
378 
379 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
380 		if (ret) {
381 			ath6kl_err("Unable to write to the device: %d\n",
382 				   ret);
383 			return ret;
384 		}
385 		len_remain -= tx_len; addr += tx_len;
386 	}
387 
388 	return 0;
389 }
390 
391 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
392 {
393 	u32 cid = BMI_EXECUTE;
394 	int ret;
395 	u32 offset;
396 	u16 size;
397 
398 	if (ar->bmi.done_sent) {
399 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
400 		return -EACCES;
401 	}
402 
403 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
404 	if (size > MAX_BMI_CMDBUF_SZ) {
405 		WARN_ON(1);
406 		return -EINVAL;
407 	}
408 	memset(ar->bmi.cmd_buf, 0, size);
409 
410 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
411 		   addr, *param);
412 
413 	offset = 0;
414 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
415 	offset += sizeof(cid);
416 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
417 	offset += sizeof(addr);
418 	memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
419 	offset += sizeof(*param);
420 
421 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
422 	if (ret) {
423 		ath6kl_err("Unable to write to the device: %d\n", ret);
424 		return ret;
425 	}
426 
427 	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), false);
428 	if (ret) {
429 		ath6kl_err("Unable to read from the device: %d\n", ret);
430 		return ret;
431 	}
432 
433 	memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
434 
435 	return 0;
436 }
437 
438 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
439 {
440 	u32 cid = BMI_SET_APP_START;
441 	int ret;
442 	u32 offset;
443 	u16 size;
444 
445 	if (ar->bmi.done_sent) {
446 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
447 		return -EACCES;
448 	}
449 
450 	size = sizeof(cid) + sizeof(addr);
451 	if (size > MAX_BMI_CMDBUF_SZ) {
452 		WARN_ON(1);
453 		return -EINVAL;
454 	}
455 	memset(ar->bmi.cmd_buf, 0, size);
456 
457 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
458 
459 	offset = 0;
460 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
461 	offset += sizeof(cid);
462 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
463 	offset += sizeof(addr);
464 
465 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
466 	if (ret) {
467 		ath6kl_err("Unable to write to the device: %d\n", ret);
468 		return ret;
469 	}
470 
471 	return 0;
472 }
473 
474 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
475 {
476 	u32 cid = BMI_READ_SOC_REGISTER;
477 	int ret;
478 	u32 offset;
479 	u16 size;
480 
481 	if (ar->bmi.done_sent) {
482 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
483 		return -EACCES;
484 	}
485 
486 	size = sizeof(cid) + sizeof(addr);
487 	if (size > MAX_BMI_CMDBUF_SZ) {
488 		WARN_ON(1);
489 		return -EINVAL;
490 	}
491 	memset(ar->bmi.cmd_buf, 0, size);
492 
493 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
494 
495 	offset = 0;
496 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
497 	offset += sizeof(cid);
498 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
499 	offset += sizeof(addr);
500 
501 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
502 	if (ret) {
503 		ath6kl_err("Unable to write to the device: %d\n", ret);
504 		return ret;
505 	}
506 
507 	ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), true);
508 	if (ret) {
509 		ath6kl_err("Unable to read from the device: %d\n", ret);
510 		return ret;
511 	}
512 	memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
513 
514 	return 0;
515 }
516 
517 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
518 {
519 	u32 cid = BMI_WRITE_SOC_REGISTER;
520 	int ret;
521 	u32 offset;
522 	u16 size;
523 
524 	if (ar->bmi.done_sent) {
525 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
526 		return -EACCES;
527 	}
528 
529 	size = sizeof(cid) + sizeof(addr) + sizeof(param);
530 	if (size > MAX_BMI_CMDBUF_SZ) {
531 		WARN_ON(1);
532 		return -EINVAL;
533 	}
534 	memset(ar->bmi.cmd_buf, 0, size);
535 
536 	ath6kl_dbg(ATH6KL_DBG_BMI,
537 		   "bmi write SOC reg: addr: 0x%x, param: %d\n",
538 		    addr, param);
539 
540 	offset = 0;
541 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
542 	offset += sizeof(cid);
543 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
544 	offset += sizeof(addr);
545 	memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
546 	offset += sizeof(param);
547 
548 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
549 	if (ret) {
550 		ath6kl_err("Unable to write to the device: %d\n", ret);
551 		return ret;
552 	}
553 
554 	return 0;
555 }
556 
557 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
558 {
559 	u32 cid = BMI_LZ_DATA;
560 	int ret;
561 	u32 offset;
562 	u32 len_remain, tx_len;
563 	const u32 header = sizeof(cid) + sizeof(len);
564 	u16 size;
565 
566 	if (ar->bmi.done_sent) {
567 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
568 		return -EACCES;
569 	}
570 
571 	size = BMI_DATASZ_MAX + header;
572 	if (size > MAX_BMI_CMDBUF_SZ) {
573 		WARN_ON(1);
574 		return -EINVAL;
575 	}
576 	memset(ar->bmi.cmd_buf, 0, size);
577 
578 	ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
579 		   len);
580 
581 	len_remain = len;
582 	while (len_remain) {
583 		tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
584 			  len_remain : (BMI_DATASZ_MAX - header);
585 
586 		offset = 0;
587 		memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
588 		offset += sizeof(cid);
589 		memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
590 		offset += sizeof(tx_len);
591 		memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
592 			tx_len);
593 		offset += tx_len;
594 
595 		ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
596 		if (ret) {
597 			ath6kl_err("Unable to write to the device: %d\n",
598 				   ret);
599 			return ret;
600 		}
601 
602 		len_remain -= tx_len;
603 	}
604 
605 	return 0;
606 }
607 
608 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
609 {
610 	u32 cid = BMI_LZ_STREAM_START;
611 	int ret;
612 	u32 offset;
613 	u16 size;
614 
615 	if (ar->bmi.done_sent) {
616 		ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
617 		return -EACCES;
618 	}
619 
620 	size = sizeof(cid) + sizeof(addr);
621 	if (size > MAX_BMI_CMDBUF_SZ) {
622 		WARN_ON(1);
623 		return -EINVAL;
624 	}
625 	memset(ar->bmi.cmd_buf, 0, size);
626 
627 	ath6kl_dbg(ATH6KL_DBG_BMI,
628 		   "bmi LZ stream start: addr: 0x%x)\n",
629 		    addr);
630 
631 	offset = 0;
632 	memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
633 	offset += sizeof(cid);
634 	memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
635 	offset += sizeof(addr);
636 
637 	ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
638 	if (ret) {
639 		ath6kl_err("Unable to start LZ stream to the device: %d\n",
640 			   ret);
641 		return ret;
642 	}
643 
644 	return 0;
645 }
646 
647 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
648 {
649 	int ret;
650 	u32 last_word = 0;
651 	u32 last_word_offset = len & ~0x3;
652 	u32 unaligned_bytes = len & 0x3;
653 
654 	ret = ath6kl_bmi_lz_stream_start(ar, addr);
655 	if (ret)
656 		return ret;
657 
658 	if (unaligned_bytes) {
659 		/* copy the last word into a zero padded buffer */
660 		memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
661 	}
662 
663 	ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
664 	if (ret)
665 		return ret;
666 
667 	if (unaligned_bytes)
668 		ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
669 
670 	if (!ret) {
671 		/* Close compressed stream and open a new (fake) one.
672 		 * This serves mainly to flush Target caches. */
673 		ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
674 	}
675 	return ret;
676 }
677 
678 int ath6kl_bmi_init(struct ath6kl *ar)
679 {
680 	ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
681 
682 	if (!ar->bmi.cmd_buf)
683 		return -ENOMEM;
684 
685 	return 0;
686 }
687 
688 void ath6kl_bmi_cleanup(struct ath6kl *ar)
689 {
690 	kfree(ar->bmi.cmd_buf);
691 	ar->bmi.cmd_buf = NULL;
692 }
693