xref: /openbmc/linux/drivers/net/wireless/ath/ath6kl/hif.c (revision d60e8ab6)
1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include "hif.h"
17 
18 #include "core.h"
19 #include "target.h"
20 #include "hif-ops.h"
21 #include "debug.h"
22 
23 #define MAILBOX_FOR_BLOCK_SIZE          1
24 
25 #define ATH6KL_TIME_QUANTUM	10  /* in ms */
26 
27 static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
28 				      bool from_dma)
29 {
30 	u8 *buf;
31 	int i;
32 
33 	buf = req->virt_dma_buf;
34 
35 	for (i = 0; i < req->scat_entries; i++) {
36 
37 		if (from_dma)
38 			memcpy(req->scat_list[i].buf, buf,
39 			       req->scat_list[i].len);
40 		else
41 			memcpy(buf, req->scat_list[i].buf,
42 			       req->scat_list[i].len);
43 
44 		buf += req->scat_list[i].len;
45 	}
46 
47 	return 0;
48 }
49 
50 int ath6kl_hif_rw_comp_handler(void *context, int status)
51 {
52 	struct htc_packet *packet = context;
53 
54 	ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
55 		   packet, status);
56 
57 	packet->status = status;
58 	packet->completion(packet->context, packet);
59 
60 	return 0;
61 }
62 
63 static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
64 {
65 	u32 dummy;
66 	int status;
67 
68 	ath6kl_err("target debug interrupt\n");
69 
70 	ath6kl_target_failure(dev->ar);
71 
72 	/*
73 	 * read counter to clear the interrupt, the debug error interrupt is
74 	 * counter 0.
75 	 */
76 	status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
77 				     (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
78 	if (status)
79 		WARN_ON(1);
80 
81 	return status;
82 }
83 
84 /* mailbox recv message polling */
85 int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
86 			      int timeout)
87 {
88 	struct ath6kl_irq_proc_registers *rg;
89 	int status = 0, i;
90 	u8 htc_mbox = 1 << HTC_MAILBOX;
91 
92 	for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
93 		/* this is the standard HIF way, load the reg table */
94 		status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
95 					     (u8 *) &dev->irq_proc_reg,
96 					     sizeof(dev->irq_proc_reg),
97 					     HIF_RD_SYNC_BYTE_INC);
98 
99 		if (status) {
100 			ath6kl_err("failed to read reg table\n");
101 			return status;
102 		}
103 
104 		/* check for MBOX data and valid lookahead */
105 		if (dev->irq_proc_reg.host_int_status & htc_mbox) {
106 			if (dev->irq_proc_reg.rx_lkahd_valid &
107 			    htc_mbox) {
108 				/*
109 				 * Mailbox has a message and the look ahead
110 				 * is valid.
111 				 */
112 				rg = &dev->irq_proc_reg;
113 				*lk_ahd =
114 					le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
115 				break;
116 			}
117 		}
118 
119 		/* delay a little  */
120 		mdelay(ATH6KL_TIME_QUANTUM);
121 		ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
122 	}
123 
124 	if (i == 0) {
125 		ath6kl_err("timeout waiting for recv message\n");
126 		status = -ETIME;
127 		/* check if the target asserted */
128 		if (dev->irq_proc_reg.counter_int_status &
129 		    ATH6KL_TARGET_DEBUG_INTR_MASK)
130 			/*
131 			 * Target failure handler will be called in case of
132 			 * an assert.
133 			 */
134 			ath6kl_hif_proc_dbg_intr(dev);
135 	}
136 
137 	return status;
138 }
139 
140 /*
141  * Disable packet reception (used in case the host runs out of buffers)
142  * using the interrupt enable registers through the host I/F
143  */
144 int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
145 {
146 	struct ath6kl_irq_enable_reg regs;
147 	int status = 0;
148 
149 	ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
150 		   enable_rx ? "enable" : "disable");
151 
152 	/* take the lock to protect interrupt enable shadows */
153 	spin_lock_bh(&dev->lock);
154 
155 	if (enable_rx)
156 		dev->irq_en_reg.int_status_en |=
157 			SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
158 	else
159 		dev->irq_en_reg.int_status_en &=
160 		    ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
161 
162 	memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
163 
164 	spin_unlock_bh(&dev->lock);
165 
166 	status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
167 				     &regs.int_status_en,
168 				     sizeof(struct ath6kl_irq_enable_reg),
169 				     HIF_WR_SYNC_BYTE_INC);
170 
171 	return status;
172 }
173 
174 int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
175 			      struct hif_scatter_req *scat_req, bool read)
176 {
177 	int status = 0;
178 
179 	if (read) {
180 		scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
181 		scat_req->addr = dev->ar->mbox_info.htc_addr;
182 	} else {
183 		scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
184 
185 		scat_req->addr =
186 			(scat_req->len > HIF_MBOX_WIDTH) ?
187 			dev->ar->mbox_info.htc_ext_addr :
188 			dev->ar->mbox_info.htc_addr;
189 	}
190 
191 	ath6kl_dbg(ATH6KL_DBG_HIF,
192 		   "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
193 		   scat_req->scat_entries, scat_req->len,
194 		   scat_req->addr, !read ? "async" : "sync",
195 		   (read) ? "rd" : "wr");
196 
197 	if (!read && scat_req->virt_scat) {
198 		status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
199 		if (status) {
200 			scat_req->status = status;
201 			scat_req->complete(dev->ar->htc_target, scat_req);
202 			return 0;
203 		}
204 	}
205 
206 	status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
207 
208 	if (read) {
209 		/* in sync mode, we can touch the scatter request */
210 		scat_req->status = status;
211 		if (!status && scat_req->virt_scat)
212 			scat_req->status =
213 				ath6kl_hif_cp_scat_dma_buf(scat_req, true);
214 	}
215 
216 	return status;
217 }
218 
219 static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
220 {
221 	u8 counter_int_status;
222 
223 	ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
224 
225 	counter_int_status = dev->irq_proc_reg.counter_int_status &
226 			     dev->irq_en_reg.cntr_int_status_en;
227 
228 	ath6kl_dbg(ATH6KL_DBG_IRQ,
229 		"valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
230 		counter_int_status);
231 
232 	/*
233 	 * NOTE: other modules like GMBOX may use the counter interrupt for
234 	 * credit flow control on other counters, we only need to check for
235 	 * the debug assertion counter interrupt.
236 	 */
237 	if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
238 		return ath6kl_hif_proc_dbg_intr(dev);
239 
240 	return 0;
241 }
242 
243 static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
244 {
245 	int status;
246 	u8 error_int_status;
247 	u8 reg_buf[4];
248 
249 	ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
250 
251 	error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
252 	if (!error_int_status) {
253 		WARN_ON(1);
254 		return -EIO;
255 	}
256 
257 	ath6kl_dbg(ATH6KL_DBG_IRQ,
258 		   "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
259 		   error_int_status);
260 
261 	if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
262 		ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
263 
264 	if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
265 		ath6kl_err("rx underflow\n");
266 
267 	if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
268 		ath6kl_err("tx overflow\n");
269 
270 	/* Clear the interrupt */
271 	dev->irq_proc_reg.error_int_status &= ~error_int_status;
272 
273 	/* set W1C value to clear the interrupt, this hits the register first */
274 	reg_buf[0] = error_int_status;
275 	reg_buf[1] = 0;
276 	reg_buf[2] = 0;
277 	reg_buf[3] = 0;
278 
279 	status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
280 				     reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
281 
282 	if (status)
283 		WARN_ON(1);
284 
285 	return status;
286 }
287 
288 static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
289 {
290 	int status;
291 	u8 cpu_int_status;
292 	u8 reg_buf[4];
293 
294 	ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
295 
296 	cpu_int_status = dev->irq_proc_reg.cpu_int_status &
297 			 dev->irq_en_reg.cpu_int_status_en;
298 	if (!cpu_int_status) {
299 		WARN_ON(1);
300 		return -EIO;
301 	}
302 
303 	ath6kl_dbg(ATH6KL_DBG_IRQ,
304 		"valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
305 		cpu_int_status);
306 
307 	/* Clear the interrupt */
308 	dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
309 
310 	/*
311 	 * Set up the register transfer buffer to hit the register 4 times ,
312 	 * this is done to make the access 4-byte aligned to mitigate issues
313 	 * with host bus interconnects that restrict bus transfer lengths to
314 	 * be a multiple of 4-bytes.
315 	 */
316 
317 	/* set W1C value to clear the interrupt, this hits the register first */
318 	reg_buf[0] = cpu_int_status;
319 	/* the remaining are set to zero which have no-effect  */
320 	reg_buf[1] = 0;
321 	reg_buf[2] = 0;
322 	reg_buf[3] = 0;
323 
324 	status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
325 				     reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
326 
327 	if (status)
328 		WARN_ON(1);
329 
330 	return status;
331 }
332 
333 /* process pending interrupts synchronously */
334 static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
335 {
336 	struct ath6kl_irq_proc_registers *rg;
337 	int status = 0;
338 	u8 host_int_status = 0;
339 	u32 lk_ahd = 0;
340 	u8 htc_mbox = 1 << HTC_MAILBOX;
341 
342 	ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
343 
344 	/*
345 	 * NOTE: HIF implementation guarantees that the context of this
346 	 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
347 	 * sleep or call any API that can block or switch thread/task
348 	 * contexts. This is a fully schedulable context.
349 	 */
350 
351 	/*
352 	 * Process pending intr only when int_status_en is clear, it may
353 	 * result in unnecessary bus transaction otherwise. Target may be
354 	 * unresponsive at the time.
355 	 */
356 	if (dev->irq_en_reg.int_status_en) {
357 		/*
358 		 * Read the first 28 bytes of the HTC register table. This
359 		 * will yield us the value of different int status
360 		 * registers and the lookahead registers.
361 		 *
362 		 *    length = sizeof(int_status) + sizeof(cpu_int_status)
363 		 *             + sizeof(error_int_status) +
364 		 *             sizeof(counter_int_status) +
365 		 *             sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
366 		 *             + sizeof(hole) + sizeof(rx_lkahd) +
367 		 *             sizeof(int_status_en) +
368 		 *             sizeof(cpu_int_status_en) +
369 		 *             sizeof(err_int_status_en) +
370 		 *             sizeof(cntr_int_status_en);
371 		 */
372 		status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
373 					     (u8 *) &dev->irq_proc_reg,
374 					     sizeof(dev->irq_proc_reg),
375 					     HIF_RD_SYNC_BYTE_INC);
376 		if (status)
377 			goto out;
378 
379 		if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
380 			ath6kl_dump_registers(dev, &dev->irq_proc_reg,
381 					 &dev->irq_en_reg);
382 
383 		/* Update only those registers that are enabled */
384 		host_int_status = dev->irq_proc_reg.host_int_status &
385 				  dev->irq_en_reg.int_status_en;
386 
387 		/* Look at mbox status */
388 		if (host_int_status & htc_mbox) {
389 			/*
390 			 * Mask out pending mbox value, we use "lookAhead as
391 			 * the real flag for mbox processing.
392 			 */
393 			host_int_status &= ~htc_mbox;
394 			if (dev->irq_proc_reg.rx_lkahd_valid &
395 			    htc_mbox) {
396 				rg = &dev->irq_proc_reg;
397 				lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
398 				if (!lk_ahd)
399 					ath6kl_err("lookAhead is zero!\n");
400 			}
401 		}
402 	}
403 
404 	if (!host_int_status && !lk_ahd) {
405 		*done = true;
406 		goto out;
407 	}
408 
409 	if (lk_ahd) {
410 		int fetched = 0;
411 
412 		ath6kl_dbg(ATH6KL_DBG_IRQ,
413 			   "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
414 		/*
415 		 * Mailbox Interrupt, the HTC layer may issue async
416 		 * requests to empty the mailbox. When emptying the recv
417 		 * mailbox we use the async handler above called from the
418 		 * completion routine of the callers read request. This can
419 		 * improve performance by reducing context switching when
420 		 * we rapidly pull packets.
421 		 */
422 		status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
423 							  lk_ahd, &fetched);
424 		if (status)
425 			goto out;
426 
427 		if (!fetched)
428 			/*
429 			 * HTC could not pull any messages out due to lack
430 			 * of resources.
431 			 */
432 			dev->htc_cnxt->chk_irq_status_cnt = 0;
433 	}
434 
435 	/* now handle the rest of them */
436 	ath6kl_dbg(ATH6KL_DBG_IRQ,
437 		   "valid interrupt source(s) for other interrupts: 0x%x\n",
438 		   host_int_status);
439 
440 	if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
441 		/* CPU Interrupt */
442 		status = ath6kl_hif_proc_cpu_intr(dev);
443 		if (status)
444 			goto out;
445 	}
446 
447 	if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
448 		/* Error Interrupt */
449 		status = ath6kl_hif_proc_err_intr(dev);
450 		if (status)
451 			goto out;
452 	}
453 
454 	if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
455 		/* Counter Interrupt */
456 		status = ath6kl_hif_proc_counter_intr(dev);
457 
458 out:
459 	/*
460 	 * An optimization to bypass reading the IRQ status registers
461 	 * unecessarily which can re-wake the target, if upper layers
462 	 * determine that we are in a low-throughput mode, we can rely on
463 	 * taking another interrupt rather than re-checking the status
464 	 * registers which can re-wake the target.
465 	 *
466 	 * NOTE : for host interfaces that makes use of detecting pending
467 	 * mbox messages at hif can not use this optimization due to
468 	 * possible side effects, SPI requires the host to drain all
469 	 * messages from the mailbox before exiting the ISR routine.
470 	 */
471 
472 	ath6kl_dbg(ATH6KL_DBG_IRQ,
473 		   "bypassing irq status re-check, forcing done\n");
474 
475 	if (!dev->htc_cnxt->chk_irq_status_cnt)
476 		*done = true;
477 
478 	ath6kl_dbg(ATH6KL_DBG_IRQ,
479 		   "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
480 
481 	return status;
482 }
483 
484 /* interrupt handler, kicks off all interrupt processing */
485 int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
486 {
487 	struct ath6kl_device *dev = ar->htc_target->dev;
488 	unsigned long timeout;
489 	int status = 0;
490 	bool done = false;
491 
492 	/*
493 	 * Reset counter used to flag a re-scan of IRQ status registers on
494 	 * the target.
495 	 */
496 	dev->htc_cnxt->chk_irq_status_cnt = 0;
497 
498 	/*
499 	 * IRQ processing is synchronous, interrupt status registers can be
500 	 * re-read.
501 	 */
502 	timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
503 	while (time_before(jiffies, timeout) && !done) {
504 		status = proc_pending_irqs(dev, &done);
505 		if (status)
506 			break;
507 	}
508 
509 	return status;
510 }
511 
512 static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
513 {
514 	struct ath6kl_irq_enable_reg regs;
515 	int status;
516 
517 	spin_lock_bh(&dev->lock);
518 
519 	/* Enable all but ATH6KL CPU interrupts */
520 	dev->irq_en_reg.int_status_en =
521 			SM(INT_STATUS_ENABLE_ERROR, 0x01) |
522 			SM(INT_STATUS_ENABLE_CPU, 0x01) |
523 			SM(INT_STATUS_ENABLE_COUNTER, 0x01);
524 
525 	/*
526 	 * NOTE: There are some cases where HIF can do detection of
527 	 * pending mbox messages which is disabled now.
528 	 */
529 	dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
530 
531 	/* Set up the CPU Interrupt status Register */
532 	dev->irq_en_reg.cpu_int_status_en = 0;
533 
534 	/* Set up the Error Interrupt status Register */
535 	dev->irq_en_reg.err_int_status_en =
536 		SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
537 		SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
538 
539 	/*
540 	 * Enable Counter interrupt status register to get fatal errors for
541 	 * debugging.
542 	 */
543 	dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
544 						ATH6KL_TARGET_DEBUG_INTR_MASK);
545 	memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
546 
547 	spin_unlock_bh(&dev->lock);
548 
549 	status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
550 				     &regs.int_status_en, sizeof(regs),
551 				     HIF_WR_SYNC_BYTE_INC);
552 
553 	if (status)
554 		ath6kl_err("failed to update interrupt ctl reg err: %d\n",
555 			   status);
556 
557 	return status;
558 }
559 
560 int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
561 {
562 	struct ath6kl_irq_enable_reg regs;
563 
564 	spin_lock_bh(&dev->lock);
565 	/* Disable all interrupts */
566 	dev->irq_en_reg.int_status_en = 0;
567 	dev->irq_en_reg.cpu_int_status_en = 0;
568 	dev->irq_en_reg.err_int_status_en = 0;
569 	dev->irq_en_reg.cntr_int_status_en = 0;
570 	memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
571 	spin_unlock_bh(&dev->lock);
572 
573 	return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
574 				   &regs.int_status_en, sizeof(regs),
575 				   HIF_WR_SYNC_BYTE_INC);
576 }
577 
578 /* enable device interrupts */
579 int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
580 {
581 	int status = 0;
582 
583 	/*
584 	 * Make sure interrupt are disabled before unmasking at the HIF
585 	 * layer. The rationale here is that between device insertion
586 	 * (where we clear the interrupts the first time) and when HTC
587 	 * is finally ready to handle interrupts, other software can perform
588 	 * target "soft" resets. The ATH6KL interrupt enables reset back to an
589 	 * "enabled" state when this happens.
590 	 */
591 	ath6kl_hif_disable_intrs(dev);
592 
593 	/* unmask the host controller interrupts */
594 	ath6kl_hif_irq_enable(dev->ar);
595 	status = ath6kl_hif_enable_intrs(dev);
596 
597 	return status;
598 }
599 
600 /* disable all device interrupts */
601 int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
602 {
603 	/*
604 	 * Mask the interrupt at the HIF layer to avoid any stray interrupt
605 	 * taken while we zero out our shadow registers in
606 	 * ath6kl_hif_disable_intrs().
607 	 */
608 	ath6kl_hif_irq_disable(dev->ar);
609 
610 	return ath6kl_hif_disable_intrs(dev);
611 }
612 
613 int ath6kl_hif_setup(struct ath6kl_device *dev)
614 {
615 	int status = 0;
616 
617 	spin_lock_init(&dev->lock);
618 
619 	/*
620 	 * NOTE: we actually get the block size of a mailbox other than 0,
621 	 * for SDIO the block size on mailbox 0 is artificially set to 1.
622 	 * So we use the block size that is set for the other 3 mailboxes.
623 	 */
624 	dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
625 
626 	/* must be a power of 2 */
627 	if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
628 		WARN_ON(1);
629 		status = -EINVAL;
630 		goto fail_setup;
631 	}
632 
633 	/* assemble mask, used for padding to a block */
634 	dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
635 
636 	ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
637 		   dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
638 
639 	status = ath6kl_hif_disable_intrs(dev);
640 
641 fail_setup:
642 	return status;
643 
644 }
645