1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2004-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/mmc/card.h>
10 #include <linux/mmc/mmc.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/sdio_func.h>
13 #include <linux/mmc/sdio_ids.h>
14 #include <linux/mmc/sdio.h>
15 #include <linux/mmc/sd.h>
16 #include <linux/bitfield.h>
17 #include "core.h"
18 #include "bmi.h"
19 #include "debug.h"
20 #include "hif.h"
21 #include "htc.h"
22 #include "mac.h"
23 #include "targaddrs.h"
24 #include "trace.h"
25 #include "sdio.h"
26 #include "coredump.h"
27 
28 void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
29 
30 #define ATH10K_SDIO_VSG_BUF_SIZE	(64 * 1024)
31 
32 /* inlined helper functions */
33 
34 static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
35 						   size_t len)
36 {
37 	return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
38 }
39 
40 static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
41 {
42 	return (enum ath10k_htc_ep_id)pipe_id;
43 }
44 
45 static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
46 {
47 	dev_kfree_skb(pkt->skb);
48 	pkt->skb = NULL;
49 	pkt->alloc_len = 0;
50 	pkt->act_len = 0;
51 	pkt->trailer_only = false;
52 }
53 
54 static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
55 						size_t act_len, size_t full_len,
56 						bool part_of_bundle,
57 						bool last_in_bundle)
58 {
59 	pkt->skb = dev_alloc_skb(full_len);
60 	if (!pkt->skb)
61 		return -ENOMEM;
62 
63 	pkt->act_len = act_len;
64 	pkt->alloc_len = full_len;
65 	pkt->part_of_bundle = part_of_bundle;
66 	pkt->last_in_bundle = last_in_bundle;
67 	pkt->trailer_only = false;
68 
69 	return 0;
70 }
71 
72 static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
73 {
74 	bool trailer_only = false;
75 	struct ath10k_htc_hdr *htc_hdr =
76 		(struct ath10k_htc_hdr *)pkt->skb->data;
77 	u16 len = __le16_to_cpu(htc_hdr->len);
78 
79 	if (len == htc_hdr->trailer_len)
80 		trailer_only = true;
81 
82 	return trailer_only;
83 }
84 
85 /* sdio/mmc functions */
86 
87 static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
88 					     unsigned int address,
89 					     unsigned char val)
90 {
91 	*arg = FIELD_PREP(BIT(31), write) |
92 	       FIELD_PREP(BIT(27), raw) |
93 	       FIELD_PREP(BIT(26), 1) |
94 	       FIELD_PREP(GENMASK(25, 9), address) |
95 	       FIELD_PREP(BIT(8), 1) |
96 	       FIELD_PREP(GENMASK(7, 0), val);
97 }
98 
99 static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
100 					   unsigned int address,
101 					   unsigned char byte)
102 {
103 	struct mmc_command io_cmd;
104 
105 	memset(&io_cmd, 0, sizeof(io_cmd));
106 	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
107 	io_cmd.opcode = SD_IO_RW_DIRECT;
108 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
109 
110 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
111 }
112 
113 static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
114 					   unsigned int address,
115 					   unsigned char *byte)
116 {
117 	struct mmc_command io_cmd;
118 	int ret;
119 
120 	memset(&io_cmd, 0, sizeof(io_cmd));
121 	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
122 	io_cmd.opcode = SD_IO_RW_DIRECT;
123 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
124 
125 	ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
126 	if (!ret)
127 		*byte = io_cmd.resp[0];
128 
129 	return ret;
130 }
131 
132 static int ath10k_sdio_config(struct ath10k *ar)
133 {
134 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
135 	struct sdio_func *func = ar_sdio->func;
136 	unsigned char byte, asyncintdelay = 2;
137 	int ret;
138 
139 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
140 
141 	sdio_claim_host(func);
142 
143 	byte = 0;
144 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
145 					      SDIO_CCCR_DRIVE_STRENGTH,
146 					      &byte);
147 
148 	byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
149 	byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
150 			   ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
151 
152 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
153 					      SDIO_CCCR_DRIVE_STRENGTH,
154 					      byte);
155 
156 	byte = 0;
157 	ret = ath10k_sdio_func0_cmd52_rd_byte(
158 		func->card,
159 		CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
160 		&byte);
161 
162 	byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
163 		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
164 		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
165 
166 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
167 					      CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
168 					      byte);
169 	if (ret) {
170 		ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
171 		goto out;
172 	}
173 
174 	byte = 0;
175 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
176 					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
177 					      &byte);
178 
179 	byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
180 
181 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
182 					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
183 					      byte);
184 	if (ret) {
185 		ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
186 			    ret);
187 		goto out;
188 	}
189 
190 	byte = 0;
191 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
192 					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
193 					      &byte);
194 
195 	byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
196 	byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
197 
198 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
199 					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
200 					      byte);
201 
202 	/* give us some time to enable, in ms */
203 	func->enable_timeout = 100;
204 
205 	ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
206 	if (ret) {
207 		ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
208 			    ar_sdio->mbox_info.block_size, ret);
209 		goto out;
210 	}
211 
212 out:
213 	sdio_release_host(func);
214 	return ret;
215 }
216 
217 static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
218 {
219 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
220 	struct sdio_func *func = ar_sdio->func;
221 	int ret;
222 
223 	sdio_claim_host(func);
224 
225 	sdio_writel(func, val, addr, &ret);
226 	if (ret) {
227 		ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
228 			    val, addr, ret);
229 		goto out;
230 	}
231 
232 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
233 		   addr, val);
234 
235 out:
236 	sdio_release_host(func);
237 
238 	return ret;
239 }
240 
241 static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
242 {
243 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
244 	struct sdio_func *func = ar_sdio->func;
245 	__le32 *buf;
246 	int ret;
247 
248 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
249 	if (!buf)
250 		return -ENOMEM;
251 
252 	*buf = cpu_to_le32(val);
253 
254 	sdio_claim_host(func);
255 
256 	ret = sdio_writesb(func, addr, buf, sizeof(*buf));
257 	if (ret) {
258 		ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
259 			    val, addr, ret);
260 		goto out;
261 	}
262 
263 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
264 		   addr, val);
265 
266 out:
267 	sdio_release_host(func);
268 
269 	kfree(buf);
270 
271 	return ret;
272 }
273 
274 static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
275 {
276 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
277 	struct sdio_func *func = ar_sdio->func;
278 	int ret;
279 
280 	sdio_claim_host(func);
281 	*val = sdio_readl(func, addr, &ret);
282 	if (ret) {
283 		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
284 			    addr, ret);
285 		goto out;
286 	}
287 
288 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
289 		   addr, *val);
290 
291 out:
292 	sdio_release_host(func);
293 
294 	return ret;
295 }
296 
297 static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
298 {
299 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
300 	struct sdio_func *func = ar_sdio->func;
301 	int ret;
302 
303 	sdio_claim_host(func);
304 
305 	ret = sdio_memcpy_fromio(func, buf, addr, len);
306 	if (ret) {
307 		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
308 			    addr, ret);
309 		goto out;
310 	}
311 
312 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
313 		   addr, buf, len);
314 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
315 
316 out:
317 	sdio_release_host(func);
318 
319 	return ret;
320 }
321 
322 static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
323 {
324 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
325 	struct sdio_func *func = ar_sdio->func;
326 	int ret;
327 
328 	sdio_claim_host(func);
329 
330 	/* For some reason toio() doesn't have const for the buffer, need
331 	 * an ugly hack to workaround that.
332 	 */
333 	ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
334 	if (ret) {
335 		ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
336 			    addr, ret);
337 		goto out;
338 	}
339 
340 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
341 		   addr, buf, len);
342 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
343 
344 out:
345 	sdio_release_host(func);
346 
347 	return ret;
348 }
349 
350 static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
351 {
352 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
353 	struct sdio_func *func = ar_sdio->func;
354 	int ret;
355 
356 	sdio_claim_host(func);
357 
358 	len = round_down(len, ar_sdio->mbox_info.block_size);
359 
360 	ret = sdio_readsb(func, buf, addr, len);
361 	if (ret) {
362 		ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
363 			    addr, ret);
364 		goto out;
365 	}
366 
367 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
368 		   addr, buf, len);
369 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
370 
371 out:
372 	sdio_release_host(func);
373 
374 	return ret;
375 }
376 
377 /* HIF mbox functions */
378 
379 static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
380 					      struct ath10k_sdio_rx_data *pkt,
381 					      u32 *lookaheads,
382 					      int *n_lookaheads)
383 {
384 	struct ath10k_htc *htc = &ar->htc;
385 	struct sk_buff *skb = pkt->skb;
386 	struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
387 	bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
388 	enum ath10k_htc_ep_id eid;
389 	u8 *trailer;
390 	int ret;
391 
392 	if (trailer_present) {
393 		trailer = skb->data + skb->len - htc_hdr->trailer_len;
394 
395 		eid = pipe_id_to_eid(htc_hdr->eid);
396 
397 		ret = ath10k_htc_process_trailer(htc,
398 						 trailer,
399 						 htc_hdr->trailer_len,
400 						 eid,
401 						 lookaheads,
402 						 n_lookaheads);
403 		if (ret)
404 			return ret;
405 
406 		if (is_trailer_only_msg(pkt))
407 			pkt->trailer_only = true;
408 
409 		skb_trim(skb, skb->len - htc_hdr->trailer_len);
410 	}
411 
412 	skb_pull(skb, sizeof(*htc_hdr));
413 
414 	return 0;
415 }
416 
417 static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
418 					       u32 lookaheads[],
419 					       int *n_lookahead)
420 {
421 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
422 	struct ath10k_htc *htc = &ar->htc;
423 	struct ath10k_sdio_rx_data *pkt;
424 	struct ath10k_htc_ep *ep;
425 	struct ath10k_skb_rxcb *cb;
426 	enum ath10k_htc_ep_id id;
427 	int ret, i, *n_lookahead_local;
428 	u32 *lookaheads_local;
429 	int lookahead_idx = 0;
430 
431 	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
432 		lookaheads_local = lookaheads;
433 		n_lookahead_local = n_lookahead;
434 
435 		id = ((struct ath10k_htc_hdr *)
436 		      &lookaheads[lookahead_idx++])->eid;
437 
438 		if (id >= ATH10K_HTC_EP_COUNT) {
439 			ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
440 				    id);
441 			ret = -ENOMEM;
442 			goto out;
443 		}
444 
445 		ep = &htc->endpoint[id];
446 
447 		if (ep->service_id == 0) {
448 			ath10k_warn(ar, "ep %d is not connected\n", id);
449 			ret = -ENOMEM;
450 			goto out;
451 		}
452 
453 		pkt = &ar_sdio->rx_pkts[i];
454 
455 		if (pkt->part_of_bundle && !pkt->last_in_bundle) {
456 			/* Only read lookahead's from RX trailers
457 			 * for the last packet in a bundle.
458 			 */
459 			lookahead_idx--;
460 			lookaheads_local = NULL;
461 			n_lookahead_local = NULL;
462 		}
463 
464 		ret = ath10k_sdio_mbox_rx_process_packet(ar,
465 							 pkt,
466 							 lookaheads_local,
467 							 n_lookahead_local);
468 		if (ret)
469 			goto out;
470 
471 		if (!pkt->trailer_only) {
472 			cb = ATH10K_SKB_RXCB(pkt->skb);
473 			cb->eid = id;
474 
475 			skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
476 			queue_work(ar->workqueue_aux,
477 				   &ar_sdio->async_work_rx);
478 		} else {
479 			kfree_skb(pkt->skb);
480 		}
481 
482 		/* The RX complete handler now owns the skb...*/
483 		pkt->skb = NULL;
484 		pkt->alloc_len = 0;
485 	}
486 
487 	ret = 0;
488 
489 out:
490 	/* Free all packets that was not passed on to the RX completion
491 	 * handler...
492 	 */
493 	for (; i < ar_sdio->n_rx_pkts; i++)
494 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
495 
496 	return ret;
497 }
498 
499 static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
500 					 struct ath10k_sdio_rx_data *rx_pkts,
501 					 struct ath10k_htc_hdr *htc_hdr,
502 					 size_t full_len, size_t act_len,
503 					 size_t *bndl_cnt)
504 {
505 	int ret, i;
506 	u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
507 
508 	*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
509 
510 	if (*bndl_cnt > max_msgs) {
511 		ath10k_warn(ar,
512 			    "HTC bundle length %u exceeds maximum %u\n",
513 			    le16_to_cpu(htc_hdr->len),
514 			    max_msgs);
515 		return -ENOMEM;
516 	}
517 
518 	/* Allocate bndl_cnt extra skb's for the bundle.
519 	 * The package containing the
520 	 * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
521 	 * in bndl_cnt. The skb for that packet will be
522 	 * allocated separately.
523 	 */
524 	for (i = 0; i < *bndl_cnt; i++) {
525 		ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
526 						    act_len,
527 						    full_len,
528 						    true,
529 						    false);
530 		if (ret)
531 			return ret;
532 	}
533 
534 	return 0;
535 }
536 
537 static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
538 				     u32 lookaheads[], int n_lookaheads)
539 {
540 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
541 	struct ath10k_htc_hdr *htc_hdr;
542 	size_t full_len, act_len;
543 	bool last_in_bundle;
544 	int ret, i;
545 	int pkt_cnt = 0;
546 
547 	if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
548 		ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
549 			    n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
550 		ret = -ENOMEM;
551 		goto err;
552 	}
553 
554 	for (i = 0; i < n_lookaheads; i++) {
555 		htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
556 		last_in_bundle = false;
557 
558 		if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
559 			ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
560 				    le16_to_cpu(htc_hdr->len),
561 				    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
562 			ret = -ENOMEM;
563 
564 			ath10k_core_start_recovery(ar);
565 			ath10k_warn(ar, "exceeds length, start recovery\n");
566 
567 			goto err;
568 		}
569 
570 		act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
571 		full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
572 
573 		if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
574 			ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
575 				    htc_hdr->eid, htc_hdr->flags,
576 				    le16_to_cpu(htc_hdr->len));
577 			ret = -EINVAL;
578 			goto err;
579 		}
580 
581 		if (ath10k_htc_get_bundle_count(
582 			ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
583 			/* HTC header indicates that every packet to follow
584 			 * has the same padded length so that it can be
585 			 * optimally fetched as a full bundle.
586 			 */
587 			size_t bndl_cnt;
588 
589 			ret = ath10k_sdio_mbox_alloc_bundle(ar,
590 							    &ar_sdio->rx_pkts[pkt_cnt],
591 							    htc_hdr,
592 							    full_len,
593 							    act_len,
594 							    &bndl_cnt);
595 
596 			if (ret) {
597 				ath10k_warn(ar, "failed to allocate a bundle: %d\n",
598 					    ret);
599 				goto err;
600 			}
601 
602 			pkt_cnt += bndl_cnt;
603 
604 			/* next buffer will be the last in the bundle */
605 			last_in_bundle = true;
606 		}
607 
608 		/* Allocate skb for packet. If the packet had the
609 		 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
610 		 * packet skb's have been allocated in the previous step.
611 		 */
612 		if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
613 			full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
614 
615 		ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
616 						    act_len,
617 						    full_len,
618 						    last_in_bundle,
619 						    last_in_bundle);
620 		if (ret) {
621 			ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
622 			goto err;
623 		}
624 
625 		pkt_cnt++;
626 	}
627 
628 	ar_sdio->n_rx_pkts = pkt_cnt;
629 
630 	return 0;
631 
632 err:
633 	for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
634 		if (!ar_sdio->rx_pkts[i].alloc_len)
635 			break;
636 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
637 	}
638 
639 	return ret;
640 }
641 
642 static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
643 {
644 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
645 	struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
646 	struct sk_buff *skb = pkt->skb;
647 	struct ath10k_htc_hdr *htc_hdr;
648 	int ret;
649 
650 	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
651 				 skb->data, pkt->alloc_len);
652 	if (ret)
653 		goto err;
654 
655 	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
656 	pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
657 
658 	if (pkt->act_len > pkt->alloc_len) {
659 		ret = -EINVAL;
660 		goto err;
661 	}
662 
663 	skb_put(skb, pkt->act_len);
664 	return 0;
665 
666 err:
667 	ar_sdio->n_rx_pkts = 0;
668 	ath10k_sdio_mbox_free_rx_pkt(pkt);
669 
670 	return ret;
671 }
672 
673 static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
674 {
675 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
676 	struct ath10k_sdio_rx_data *pkt;
677 	struct ath10k_htc_hdr *htc_hdr;
678 	int ret, i;
679 	u32 pkt_offset, virt_pkt_len;
680 
681 	virt_pkt_len = 0;
682 	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
683 		virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
684 
685 	if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
686 		ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
687 		ret = -E2BIG;
688 		goto err;
689 	}
690 
691 	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
692 				 ar_sdio->vsg_buffer, virt_pkt_len);
693 	if (ret) {
694 		ath10k_warn(ar, "failed to read bundle packets: %d", ret);
695 		goto err;
696 	}
697 
698 	pkt_offset = 0;
699 	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
700 		pkt = &ar_sdio->rx_pkts[i];
701 		htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
702 		pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
703 
704 		if (pkt->act_len > pkt->alloc_len) {
705 			ret = -EINVAL;
706 			goto err;
707 		}
708 
709 		skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
710 		pkt_offset += pkt->alloc_len;
711 	}
712 
713 	return 0;
714 
715 err:
716 	/* Free all packets that was not successfully fetched. */
717 	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
718 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
719 
720 	ar_sdio->n_rx_pkts = 0;
721 
722 	return ret;
723 }
724 
725 /* This is the timeout for mailbox processing done in the sdio irq
726  * handler. The timeout is deliberately set quite high since SDIO dump logs
727  * over serial port can/will add a substantial overhead to the processing
728  * (if enabled).
729  */
730 #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
731 
732 static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
733 						  u32 msg_lookahead, bool *done)
734 {
735 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
736 	u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
737 	int n_lookaheads = 1;
738 	unsigned long timeout;
739 	int ret;
740 
741 	*done = true;
742 
743 	/* Copy the lookahead obtained from the HTC register table into our
744 	 * temp array as a start value.
745 	 */
746 	lookaheads[0] = msg_lookahead;
747 
748 	timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
749 	do {
750 		/* Try to allocate as many HTC RX packets indicated by
751 		 * n_lookaheads.
752 		 */
753 		ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
754 						n_lookaheads);
755 		if (ret)
756 			break;
757 
758 		if (ar_sdio->n_rx_pkts >= 2)
759 			/* A recv bundle was detected, force IRQ status
760 			 * re-check again.
761 			 */
762 			*done = false;
763 
764 		if (ar_sdio->n_rx_pkts > 1)
765 			ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
766 		else
767 			ret = ath10k_sdio_mbox_rx_fetch(ar);
768 
769 		/* Process fetched packets. This will potentially update
770 		 * n_lookaheads depending on if the packets contain lookahead
771 		 * reports.
772 		 */
773 		n_lookaheads = 0;
774 		ret = ath10k_sdio_mbox_rx_process_packets(ar,
775 							  lookaheads,
776 							  &n_lookaheads);
777 
778 		if (!n_lookaheads || ret)
779 			break;
780 
781 		/* For SYNCH processing, if we get here, we are running
782 		 * through the loop again due to updated lookaheads. Set
783 		 * flag that we should re-check IRQ status registers again
784 		 * before leaving IRQ processing, this can net better
785 		 * performance in high throughput situations.
786 		 */
787 		*done = false;
788 	} while (time_before(jiffies, timeout));
789 
790 	if (ret && (ret != -ECANCELED))
791 		ath10k_warn(ar, "failed to get pending recv messages: %d\n",
792 			    ret);
793 
794 	return ret;
795 }
796 
797 static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
798 {
799 	u32 val;
800 	int ret;
801 
802 	/* TODO: Add firmware crash handling */
803 	ath10k_warn(ar, "firmware crashed\n");
804 
805 	/* read counter to clear the interrupt, the debug error interrupt is
806 	 * counter 0.
807 	 */
808 	ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
809 	if (ret)
810 		ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
811 
812 	return ret;
813 }
814 
815 static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
816 {
817 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
818 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
819 	u8 counter_int_status;
820 	int ret;
821 
822 	mutex_lock(&irq_data->mtx);
823 	counter_int_status = irq_data->irq_proc_reg->counter_int_status &
824 			     irq_data->irq_en_reg->cntr_int_status_en;
825 
826 	/* NOTE: other modules like GMBOX may use the counter interrupt for
827 	 * credit flow control on other counters, we only need to check for
828 	 * the debug assertion counter interrupt.
829 	 */
830 	if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
831 		ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
832 	else
833 		ret = 0;
834 
835 	mutex_unlock(&irq_data->mtx);
836 
837 	return ret;
838 }
839 
840 static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
841 {
842 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
843 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
844 	u8 error_int_status;
845 	int ret;
846 
847 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
848 
849 	error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
850 	if (!error_int_status) {
851 		ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
852 			    error_int_status);
853 		return -EIO;
854 	}
855 
856 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
857 		   "sdio error_int_status 0x%x\n", error_int_status);
858 
859 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
860 		      error_int_status))
861 		ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
862 
863 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
864 		      error_int_status))
865 		ath10k_warn(ar, "rx underflow interrupt error\n");
866 
867 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
868 		      error_int_status))
869 		ath10k_warn(ar, "tx overflow interrupt error\n");
870 
871 	/* Clear the interrupt */
872 	irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
873 
874 	/* set W1C value to clear the interrupt, this hits the register first */
875 	ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
876 				    error_int_status);
877 	if (ret) {
878 		ath10k_warn(ar, "unable to write to error int status address: %d\n",
879 			    ret);
880 		return ret;
881 	}
882 
883 	return 0;
884 }
885 
886 static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
887 {
888 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
889 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
890 	u8 cpu_int_status;
891 	int ret;
892 
893 	mutex_lock(&irq_data->mtx);
894 	cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
895 			 irq_data->irq_en_reg->cpu_int_status_en;
896 	if (!cpu_int_status) {
897 		ath10k_warn(ar, "CPU interrupt status is zero\n");
898 		ret = -EIO;
899 		goto out;
900 	}
901 
902 	/* Clear the interrupt */
903 	irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
904 
905 	/* Set up the register transfer buffer to hit the register 4 times,
906 	 * this is done to make the access 4-byte aligned to mitigate issues
907 	 * with host bus interconnects that restrict bus transfer lengths to
908 	 * be a multiple of 4-bytes.
909 	 *
910 	 * Set W1C value to clear the interrupt, this hits the register first.
911 	 */
912 	ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
913 				    cpu_int_status);
914 	if (ret) {
915 		ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
916 			    ret);
917 		goto out;
918 	}
919 
920 out:
921 	mutex_unlock(&irq_data->mtx);
922 	if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
923 		ath10k_sdio_fw_crashed_dump(ar);
924 
925 	return ret;
926 }
927 
928 static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
929 					    u8 *host_int_status,
930 					    u32 *lookahead)
931 {
932 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
933 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
934 	struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
935 	struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
936 	u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
937 	int ret;
938 
939 	mutex_lock(&irq_data->mtx);
940 
941 	*lookahead = 0;
942 	*host_int_status = 0;
943 
944 	/* int_status_en is supposed to be non zero, otherwise interrupts
945 	 * shouldn't be enabled. There is however a short time frame during
946 	 * initialization between the irq register and int_status_en init
947 	 * where this can happen.
948 	 * We silently ignore this condition.
949 	 */
950 	if (!irq_en_reg->int_status_en) {
951 		ret = 0;
952 		goto out;
953 	}
954 
955 	/* Read the first sizeof(struct ath10k_irq_proc_registers)
956 	 * bytes of the HTC register table. This
957 	 * will yield us the value of different int status
958 	 * registers and the lookahead registers.
959 	 */
960 	ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
961 			       irq_proc_reg, sizeof(*irq_proc_reg));
962 	if (ret) {
963 		ath10k_core_start_recovery(ar);
964 		ath10k_warn(ar, "read int status fail, start recovery\n");
965 		goto out;
966 	}
967 
968 	/* Update only those registers that are enabled */
969 	*host_int_status = irq_proc_reg->host_int_status &
970 			   irq_en_reg->int_status_en;
971 
972 	/* Look at mbox status */
973 	if (!(*host_int_status & htc_mbox)) {
974 		*lookahead = 0;
975 		ret = 0;
976 		goto out;
977 	}
978 
979 	/* Mask out pending mbox value, we use look ahead as
980 	 * the real flag for mbox processing.
981 	 */
982 	*host_int_status &= ~htc_mbox;
983 	if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
984 		*lookahead = le32_to_cpu(
985 			irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
986 		if (!*lookahead)
987 			ath10k_warn(ar, "sdio mbox lookahead is zero\n");
988 	}
989 
990 out:
991 	mutex_unlock(&irq_data->mtx);
992 	return ret;
993 }
994 
995 static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
996 					      bool *done)
997 {
998 	u8 host_int_status;
999 	u32 lookahead;
1000 	int ret;
1001 
1002 	/* NOTE: HIF implementation guarantees that the context of this
1003 	 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
1004 	 * sleep or call any API that can block or switch thread/task
1005 	 * contexts. This is a fully schedulable context.
1006 	 */
1007 
1008 	ret = ath10k_sdio_mbox_read_int_status(ar,
1009 					       &host_int_status,
1010 					       &lookahead);
1011 	if (ret) {
1012 		*done = true;
1013 		goto out;
1014 	}
1015 
1016 	if (!host_int_status && !lookahead) {
1017 		ret = 0;
1018 		*done = true;
1019 		goto out;
1020 	}
1021 
1022 	if (lookahead) {
1023 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1024 			   "sdio pending mailbox msg lookahead 0x%08x\n",
1025 			   lookahead);
1026 
1027 		ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1028 							     lookahead,
1029 							     done);
1030 		if (ret)
1031 			goto out;
1032 	}
1033 
1034 	/* now, handle the rest of the interrupts */
1035 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
1036 		   "sdio host_int_status 0x%x\n", host_int_status);
1037 
1038 	if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1039 		/* CPU Interrupt */
1040 		ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1041 		if (ret)
1042 			goto out;
1043 	}
1044 
1045 	if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1046 		/* Error Interrupt */
1047 		ret = ath10k_sdio_mbox_proc_err_intr(ar);
1048 		if (ret)
1049 			goto out;
1050 	}
1051 
1052 	if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1053 		/* Counter Interrupt */
1054 		ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1055 
1056 	ret = 0;
1057 
1058 out:
1059 	/* An optimization to bypass reading the IRQ status registers
1060 	 * unecessarily which can re-wake the target, if upper layers
1061 	 * determine that we are in a low-throughput mode, we can rely on
1062 	 * taking another interrupt rather than re-checking the status
1063 	 * registers which can re-wake the target.
1064 	 *
1065 	 * NOTE : for host interfaces that makes use of detecting pending
1066 	 * mbox messages at hif can not use this optimization due to
1067 	 * possible side effects, SPI requires the host to drain all
1068 	 * messages from the mailbox before exiting the ISR routine.
1069 	 */
1070 
1071 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
1072 		   "sdio pending irqs done %d status %d",
1073 		   *done, ret);
1074 
1075 	return ret;
1076 }
1077 
1078 static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1079 {
1080 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1081 	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1082 	u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1083 
1084 	mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1085 	mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1086 	mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1087 	mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1088 	mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1089 
1090 	mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1091 
1092 	dev_id_base = (device & 0x0F00);
1093 	dev_id_chiprev = (device & 0x00FF);
1094 	switch (dev_id_base) {
1095 	case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
1096 		if (dev_id_chiprev < 4)
1097 			mbox_info->ext_info[0].htc_ext_sz =
1098 				ATH10K_HIF_MBOX0_EXT_WIDTH;
1099 		else
1100 			/* from QCA6174 2.0(0x504), the width has been extended
1101 			 * to 56K
1102 			 */
1103 			mbox_info->ext_info[0].htc_ext_sz =
1104 				ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1105 		break;
1106 	case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
1107 		mbox_info->ext_info[0].htc_ext_sz =
1108 			ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1109 		break;
1110 	default:
1111 		mbox_info->ext_info[0].htc_ext_sz =
1112 				ATH10K_HIF_MBOX0_EXT_WIDTH;
1113 	}
1114 
1115 	mbox_info->ext_info[1].htc_ext_addr =
1116 		mbox_info->ext_info[0].htc_ext_addr +
1117 		mbox_info->ext_info[0].htc_ext_sz +
1118 		ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1119 	mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1120 }
1121 
1122 /* BMI functions */
1123 
1124 static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1125 {
1126 	u32 addr, cmd_credits;
1127 	unsigned long timeout;
1128 	int ret;
1129 
1130 	/* Read the counter register to get the command credits */
1131 	addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1132 	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1133 	cmd_credits = 0;
1134 
1135 	while (time_before(jiffies, timeout) && !cmd_credits) {
1136 		/* Hit the credit counter with a 4-byte access, the first byte
1137 		 * read will hit the counter and cause a decrement, while the
1138 		 * remaining 3 bytes has no effect. The rationale behind this
1139 		 * is to make all HIF accesses 4-byte aligned.
1140 		 */
1141 		ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1142 		if (ret) {
1143 			ath10k_warn(ar,
1144 				    "unable to decrement the command credit count register: %d\n",
1145 				    ret);
1146 			return ret;
1147 		}
1148 
1149 		/* The counter is only 8 bits.
1150 		 * Ignore anything in the upper 3 bytes
1151 		 */
1152 		cmd_credits &= 0xFF;
1153 	}
1154 
1155 	if (!cmd_credits) {
1156 		ath10k_warn(ar, "bmi communication timeout\n");
1157 		return -ETIMEDOUT;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1164 {
1165 	unsigned long timeout;
1166 	u32 rx_word;
1167 	int ret;
1168 
1169 	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1170 	rx_word = 0;
1171 
1172 	while ((time_before(jiffies, timeout)) && !rx_word) {
1173 		ret = ath10k_sdio_read32(ar,
1174 					 MBOX_HOST_INT_STATUS_ADDRESS,
1175 					 &rx_word);
1176 		if (ret) {
1177 			ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1178 			return ret;
1179 		}
1180 
1181 		 /* all we really want is one bit */
1182 		rx_word &= 1;
1183 	}
1184 
1185 	if (!rx_word) {
1186 		ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1187 		return -EINVAL;
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1194 					void *req, u32 req_len,
1195 					void *resp, u32 *resp_len)
1196 {
1197 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1198 	u32 addr;
1199 	int ret;
1200 
1201 	if (req) {
1202 		ret = ath10k_sdio_bmi_credits(ar);
1203 		if (ret)
1204 			return ret;
1205 
1206 		addr = ar_sdio->mbox_info.htc_addr;
1207 
1208 		memcpy(ar_sdio->bmi_buf, req, req_len);
1209 		ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1210 		if (ret) {
1211 			ath10k_warn(ar,
1212 				    "unable to send the bmi data to the device: %d\n",
1213 				    ret);
1214 			return ret;
1215 		}
1216 	}
1217 
1218 	if (!resp || !resp_len)
1219 		/* No response expected */
1220 		return 0;
1221 
1222 	/* During normal bootup, small reads may be required.
1223 	 * Rather than issue an HIF Read and then wait as the Target
1224 	 * adds successive bytes to the FIFO, we wait here until
1225 	 * we know that response data is available.
1226 	 *
1227 	 * This allows us to cleanly timeout on an unexpected
1228 	 * Target failure rather than risk problems at the HIF level.
1229 	 * In particular, this avoids SDIO timeouts and possibly garbage
1230 	 * data on some host controllers.  And on an interconnect
1231 	 * such as Compact Flash (as well as some SDIO masters) which
1232 	 * does not provide any indication on data timeout, it avoids
1233 	 * a potential hang or garbage response.
1234 	 *
1235 	 * Synchronization is more difficult for reads larger than the
1236 	 * size of the MBOX FIFO (128B), because the Target is unable
1237 	 * to push the 129th byte of data until AFTER the Host posts an
1238 	 * HIF Read and removes some FIFO data.  So for large reads the
1239 	 * Host proceeds to post an HIF Read BEFORE all the data is
1240 	 * actually available to read.  Fortunately, large BMI reads do
1241 	 * not occur in practice -- they're supported for debug/development.
1242 	 *
1243 	 * So Host/Target BMI synchronization is divided into these cases:
1244 	 *  CASE 1: length < 4
1245 	 *        Should not happen
1246 	 *
1247 	 *  CASE 2: 4 <= length <= 128
1248 	 *        Wait for first 4 bytes to be in FIFO
1249 	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
1250 	 *        a BMI command credit, which indicates that the ENTIRE
1251 	 *        response is available in the FIFO
1252 	 *
1253 	 *  CASE 3: length > 128
1254 	 *        Wait for the first 4 bytes to be in FIFO
1255 	 *
1256 	 * For most uses, a small timeout should be sufficient and we will
1257 	 * usually see a response quickly; but there may be some unusual
1258 	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1259 	 * For now, we use an unbounded busy loop while waiting for
1260 	 * BMI_EXECUTE.
1261 	 *
1262 	 * If BMI_EXECUTE ever needs to support longer-latency execution,
1263 	 * especially in production, this code needs to be enhanced to sleep
1264 	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
1265 	 * a function of Host processor speed.
1266 	 */
1267 	ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1268 	if (ret)
1269 		return ret;
1270 
1271 	/* We always read from the start of the mbox address */
1272 	addr = ar_sdio->mbox_info.htc_addr;
1273 	ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1274 	if (ret) {
1275 		ath10k_warn(ar,
1276 			    "unable to read the bmi data from the device: %d\n",
1277 			    ret);
1278 		return ret;
1279 	}
1280 
1281 	memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1282 
1283 	return 0;
1284 }
1285 
1286 /* sdio async handling functions */
1287 
1288 static struct ath10k_sdio_bus_request
1289 *ath10k_sdio_alloc_busreq(struct ath10k *ar)
1290 {
1291 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1292 	struct ath10k_sdio_bus_request *bus_req;
1293 
1294 	spin_lock_bh(&ar_sdio->lock);
1295 
1296 	if (list_empty(&ar_sdio->bus_req_freeq)) {
1297 		bus_req = NULL;
1298 		goto out;
1299 	}
1300 
1301 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1302 				   struct ath10k_sdio_bus_request, list);
1303 	list_del(&bus_req->list);
1304 
1305 out:
1306 	spin_unlock_bh(&ar_sdio->lock);
1307 	return bus_req;
1308 }
1309 
1310 static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1311 				     struct ath10k_sdio_bus_request *bus_req)
1312 {
1313 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1314 
1315 	memset(bus_req, 0, sizeof(*bus_req));
1316 
1317 	spin_lock_bh(&ar_sdio->lock);
1318 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1319 	spin_unlock_bh(&ar_sdio->lock);
1320 }
1321 
1322 static void __ath10k_sdio_write_async(struct ath10k *ar,
1323 				      struct ath10k_sdio_bus_request *req)
1324 {
1325 	struct ath10k_htc_ep *ep;
1326 	struct sk_buff *skb;
1327 	int ret;
1328 
1329 	skb = req->skb;
1330 	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1331 	if (ret)
1332 		ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1333 			    req->address, ret);
1334 
1335 	if (req->htc_msg) {
1336 		ep = &ar->htc.endpoint[req->eid];
1337 		ath10k_htc_notify_tx_completion(ep, skb);
1338 	} else if (req->comp) {
1339 		complete(req->comp);
1340 	}
1341 
1342 	ath10k_sdio_free_bus_req(ar, req);
1343 }
1344 
1345 /* To improve throughput use workqueue to deliver packets to HTC layer,
1346  * this way SDIO bus is utilised much better.
1347  */
1348 static void ath10k_rx_indication_async_work(struct work_struct *work)
1349 {
1350 	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1351 						   async_work_rx);
1352 	struct ath10k *ar = ar_sdio->ar;
1353 	struct ath10k_htc_ep *ep;
1354 	struct ath10k_skb_rxcb *cb;
1355 	struct sk_buff *skb;
1356 
1357 	while (true) {
1358 		skb = skb_dequeue(&ar_sdio->rx_head);
1359 		if (!skb)
1360 			break;
1361 		cb = ATH10K_SKB_RXCB(skb);
1362 		ep = &ar->htc.endpoint[cb->eid];
1363 		ep->ep_ops.ep_rx_complete(ar, skb);
1364 	}
1365 
1366 	if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1367 		napi_schedule(&ar->napi);
1368 }
1369 
1370 static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1371 {
1372 	struct ath10k *ar = ar_sdio->ar;
1373 	unsigned char rtc_state = 0;
1374 	int ret = 0;
1375 
1376 	rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1377 	if (ret) {
1378 		ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1379 		return ret;
1380 	}
1381 
1382 	*state = rtc_state & 0x3;
1383 
1384 	return ret;
1385 }
1386 
1387 static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1388 {
1389 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1390 	u32 val;
1391 	int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1392 	unsigned char rtc_state = 0;
1393 
1394 	sdio_claim_host(ar_sdio->func);
1395 
1396 	ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1397 	if (ret) {
1398 		ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1399 			    ret);
1400 		goto release;
1401 	}
1402 
1403 	if (enable_sleep) {
1404 		val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1405 		ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1406 	} else {
1407 		val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1408 		ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1409 	}
1410 
1411 	ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1412 	if (ret) {
1413 		ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1414 			    ret);
1415 	}
1416 
1417 	if (!enable_sleep) {
1418 		do {
1419 			udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1420 			ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1421 
1422 			if (ret) {
1423 				ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1424 				break;
1425 			}
1426 
1427 			ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1428 				   rtc_state);
1429 
1430 			if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1431 				break;
1432 
1433 			udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1434 			retry--;
1435 		} while (retry > 0);
1436 	}
1437 
1438 release:
1439 	sdio_release_host(ar_sdio->func);
1440 
1441 	return ret;
1442 }
1443 
1444 static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1445 {
1446 	struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
1447 
1448 	ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1449 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1450 }
1451 
1452 static void ath10k_sdio_write_async_work(struct work_struct *work)
1453 {
1454 	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1455 						   wr_async_work);
1456 	struct ath10k *ar = ar_sdio->ar;
1457 	struct ath10k_sdio_bus_request *req, *tmp_req;
1458 	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1459 
1460 	spin_lock_bh(&ar_sdio->wr_async_lock);
1461 
1462 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1463 		list_del(&req->list);
1464 		spin_unlock_bh(&ar_sdio->wr_async_lock);
1465 
1466 		if (req->address >= mbox_info->htc_addr &&
1467 		    ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1468 			ath10k_sdio_set_mbox_sleep(ar, false);
1469 			mod_timer(&ar_sdio->sleep_timer, jiffies +
1470 				  msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1471 		}
1472 
1473 		__ath10k_sdio_write_async(ar, req);
1474 		spin_lock_bh(&ar_sdio->wr_async_lock);
1475 	}
1476 
1477 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1478 
1479 	if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1480 		ath10k_sdio_set_mbox_sleep(ar, true);
1481 }
1482 
1483 static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1484 				      struct sk_buff *skb,
1485 				      struct completion *comp,
1486 				      bool htc_msg, enum ath10k_htc_ep_id eid)
1487 {
1488 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1489 	struct ath10k_sdio_bus_request *bus_req;
1490 
1491 	/* Allocate a bus request for the message and queue it on the
1492 	 * SDIO workqueue.
1493 	 */
1494 	bus_req = ath10k_sdio_alloc_busreq(ar);
1495 	if (!bus_req) {
1496 		ath10k_warn(ar,
1497 			    "unable to allocate bus request for async request\n");
1498 		return -ENOMEM;
1499 	}
1500 
1501 	bus_req->skb = skb;
1502 	bus_req->eid = eid;
1503 	bus_req->address = addr;
1504 	bus_req->htc_msg = htc_msg;
1505 	bus_req->comp = comp;
1506 
1507 	spin_lock_bh(&ar_sdio->wr_async_lock);
1508 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1509 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1510 
1511 	return 0;
1512 }
1513 
1514 /* IRQ handler */
1515 
1516 static void ath10k_sdio_irq_handler(struct sdio_func *func)
1517 {
1518 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1519 	struct ath10k *ar = ar_sdio->ar;
1520 	unsigned long timeout;
1521 	bool done = false;
1522 	int ret;
1523 
1524 	/* Release the host during interrupts so we can pick it back up when
1525 	 * we process commands.
1526 	 */
1527 	sdio_release_host(ar_sdio->func);
1528 
1529 	timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1530 	do {
1531 		ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1532 		if (ret)
1533 			break;
1534 	} while (time_before(jiffies, timeout) && !done);
1535 
1536 	ath10k_mac_tx_push_pending(ar);
1537 
1538 	sdio_claim_host(ar_sdio->func);
1539 
1540 	if (ret && ret != -ECANCELED)
1541 		ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1542 			    ret);
1543 }
1544 
1545 /* sdio HIF functions */
1546 
1547 static int ath10k_sdio_disable_intrs(struct ath10k *ar)
1548 {
1549 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1550 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1551 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1552 	int ret;
1553 
1554 	mutex_lock(&irq_data->mtx);
1555 
1556 	memset(regs, 0, sizeof(*regs));
1557 	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1558 				&regs->int_status_en, sizeof(*regs));
1559 	if (ret)
1560 		ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1561 
1562 	mutex_unlock(&irq_data->mtx);
1563 
1564 	return ret;
1565 }
1566 
1567 static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1568 				    enum ath10k_firmware_mode fw_mode)
1569 {
1570 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1571 	struct sdio_func *func = ar_sdio->func;
1572 	int ret;
1573 
1574 	if (!ar_sdio->is_disabled)
1575 		return 0;
1576 
1577 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1578 
1579 	ret = ath10k_sdio_config(ar);
1580 	if (ret) {
1581 		ath10k_err(ar, "failed to config sdio: %d\n", ret);
1582 		return ret;
1583 	}
1584 
1585 	sdio_claim_host(func);
1586 
1587 	ret = sdio_enable_func(func);
1588 	if (ret) {
1589 		ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1590 		sdio_release_host(func);
1591 		return ret;
1592 	}
1593 
1594 	sdio_release_host(func);
1595 
1596 	/* Wait for hardware to initialise. It should take a lot less than
1597 	 * 20 ms but let's be conservative here.
1598 	 */
1599 	msleep(20);
1600 
1601 	ar_sdio->is_disabled = false;
1602 
1603 	ret = ath10k_sdio_disable_intrs(ar);
1604 	if (ret)
1605 		return ret;
1606 
1607 	return 0;
1608 }
1609 
1610 static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1611 {
1612 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1613 	int ret;
1614 
1615 	if (ar_sdio->is_disabled)
1616 		return;
1617 
1618 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1619 
1620 	del_timer_sync(&ar_sdio->sleep_timer);
1621 	ath10k_sdio_set_mbox_sleep(ar, true);
1622 
1623 	/* Disable the card */
1624 	sdio_claim_host(ar_sdio->func);
1625 
1626 	ret = sdio_disable_func(ar_sdio->func);
1627 	if (ret) {
1628 		ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1629 		sdio_release_host(ar_sdio->func);
1630 		return;
1631 	}
1632 
1633 	ret = mmc_hw_reset(ar_sdio->func->card->host);
1634 	if (ret)
1635 		ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1636 
1637 	sdio_release_host(ar_sdio->func);
1638 
1639 	ar_sdio->is_disabled = true;
1640 }
1641 
1642 static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1643 				 struct ath10k_hif_sg_item *items, int n_items)
1644 {
1645 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1646 	enum ath10k_htc_ep_id eid;
1647 	struct sk_buff *skb;
1648 	int ret, i;
1649 
1650 	eid = pipe_id_to_eid(pipe_id);
1651 
1652 	for (i = 0; i < n_items; i++) {
1653 		size_t padded_len;
1654 		u32 address;
1655 
1656 		skb = items[i].transfer_context;
1657 		padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1658 							      skb->len);
1659 		skb_trim(skb, padded_len);
1660 
1661 		/* Write TX data to the end of the mbox address space */
1662 		address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1663 			  skb->len;
1664 		ret = ath10k_sdio_prep_async_req(ar, address, skb,
1665 						 NULL, true, eid);
1666 		if (ret)
1667 			return ret;
1668 	}
1669 
1670 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1671 
1672 	return 0;
1673 }
1674 
1675 static int ath10k_sdio_enable_intrs(struct ath10k *ar)
1676 {
1677 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1678 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1679 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1680 	int ret;
1681 
1682 	mutex_lock(&irq_data->mtx);
1683 
1684 	/* Enable all but CPU interrupts */
1685 	regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1686 			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1687 			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1688 
1689 	/* NOTE: There are some cases where HIF can do detection of
1690 	 * pending mbox messages which is disabled now.
1691 	 */
1692 	regs->int_status_en |=
1693 		FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1694 
1695 	/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
1696 	 * #0 is used for report assertion from target
1697 	 */
1698 	regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1699 
1700 	/* Set up the Error Interrupt status Register */
1701 	regs->err_int_status_en =
1702 		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1703 		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1704 
1705 	/* Enable Counter interrupt status register to get fatal errors for
1706 	 * debugging.
1707 	 */
1708 	regs->cntr_int_status_en =
1709 		FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1710 			   ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1711 
1712 	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1713 				&regs->int_status_en, sizeof(*regs));
1714 	if (ret)
1715 		ath10k_warn(ar,
1716 			    "failed to update mbox interrupt status register : %d\n",
1717 			    ret);
1718 
1719 	mutex_unlock(&irq_data->mtx);
1720 	return ret;
1721 }
1722 
1723 /* HIF diagnostics */
1724 
1725 static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1726 				     size_t buf_len)
1727 {
1728 	int ret;
1729 	void *mem;
1730 
1731 	mem = kzalloc(buf_len, GFP_KERNEL);
1732 	if (!mem)
1733 		return -ENOMEM;
1734 
1735 	/* set window register to start read cycle */
1736 	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1737 	if (ret) {
1738 		ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1739 		goto out;
1740 	}
1741 
1742 	/* read the data */
1743 	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
1744 	if (ret) {
1745 		ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1746 			    ret);
1747 		goto out;
1748 	}
1749 
1750 	memcpy(buf, mem, buf_len);
1751 
1752 out:
1753 	kfree(mem);
1754 
1755 	return ret;
1756 }
1757 
1758 static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1759 				   u32 *value)
1760 {
1761 	__le32 *val;
1762 	int ret;
1763 
1764 	val = kzalloc(sizeof(*val), GFP_KERNEL);
1765 	if (!val)
1766 		return -ENOMEM;
1767 
1768 	ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1769 	if (ret)
1770 		goto out;
1771 
1772 	*value = __le32_to_cpu(*val);
1773 
1774 out:
1775 	kfree(val);
1776 
1777 	return ret;
1778 }
1779 
1780 static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1781 					  const void *data, int nbytes)
1782 {
1783 	int ret;
1784 
1785 	/* set write data */
1786 	ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1787 	if (ret) {
1788 		ath10k_warn(ar,
1789 			    "failed to write 0x%p to mbox window data address: %d\n",
1790 			    data, ret);
1791 		return ret;
1792 	}
1793 
1794 	/* set window register, which starts the write cycle */
1795 	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1796 	if (ret) {
1797 		ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1798 		return ret;
1799 	}
1800 
1801 	return 0;
1802 }
1803 
1804 static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1805 {
1806 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1807 	u32 addr, val;
1808 	int ret = 0;
1809 
1810 	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1811 
1812 	ret = ath10k_sdio_diag_read32(ar, addr, &val);
1813 	if (ret) {
1814 		ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1815 		return ret;
1816 	}
1817 
1818 	if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1819 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1820 			   "sdio mailbox swap service enabled\n");
1821 		ar_sdio->swap_mbox = true;
1822 	} else {
1823 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1824 			   "sdio mailbox swap service disabled\n");
1825 		ar_sdio->swap_mbox = false;
1826 	}
1827 
1828 	ath10k_sdio_set_mbox_sleep(ar, true);
1829 
1830 	return 0;
1831 }
1832 
1833 static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1834 {
1835 	u32 addr, val;
1836 	int ret;
1837 
1838 	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1839 
1840 	ret = ath10k_sdio_diag_read32(ar, addr, &val);
1841 	if (ret) {
1842 		ath10k_warn(ar,
1843 			    "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
1844 		return ret;
1845 	}
1846 
1847 	ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1848 
1849 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1850 		   ret ? " " : " not ");
1851 
1852 	return ret;
1853 }
1854 
1855 /* HIF start/stop */
1856 
1857 static int ath10k_sdio_hif_start(struct ath10k *ar)
1858 {
1859 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1860 	int ret;
1861 
1862 	napi_enable(&ar->napi);
1863 
1864 	/* Sleep 20 ms before HIF interrupts are disabled.
1865 	 * This will give target plenty of time to process the BMI done
1866 	 * request before interrupts are disabled.
1867 	 */
1868 	msleep(20);
1869 	ret = ath10k_sdio_disable_intrs(ar);
1870 	if (ret)
1871 		return ret;
1872 
1873 	/* eid 0 always uses the lower part of the extended mailbox address
1874 	 * space (ext_info[0].htc_ext_addr).
1875 	 */
1876 	ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1877 	ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1878 
1879 	sdio_claim_host(ar_sdio->func);
1880 
1881 	/* Register the isr */
1882 	ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1883 	if (ret) {
1884 		ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1885 		sdio_release_host(ar_sdio->func);
1886 		return ret;
1887 	}
1888 
1889 	sdio_release_host(ar_sdio->func);
1890 
1891 	ret = ath10k_sdio_enable_intrs(ar);
1892 	if (ret)
1893 		ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1894 
1895 	/* Enable sleep and then disable it again */
1896 	ret = ath10k_sdio_set_mbox_sleep(ar, true);
1897 	if (ret)
1898 		return ret;
1899 
1900 	/* Wait for 20ms for the written value to take effect */
1901 	msleep(20);
1902 
1903 	ret = ath10k_sdio_set_mbox_sleep(ar, false);
1904 	if (ret)
1905 		return ret;
1906 
1907 	return 0;
1908 }
1909 
1910 #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1911 
1912 static void ath10k_sdio_irq_disable(struct ath10k *ar)
1913 {
1914 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1915 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1916 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1917 	struct sk_buff *skb;
1918 	struct completion irqs_disabled_comp;
1919 	int ret;
1920 
1921 	skb = dev_alloc_skb(sizeof(*regs));
1922 	if (!skb)
1923 		return;
1924 
1925 	mutex_lock(&irq_data->mtx);
1926 
1927 	memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
1928 	memcpy(skb->data, regs, sizeof(*regs));
1929 	skb_put(skb, sizeof(*regs));
1930 
1931 	mutex_unlock(&irq_data->mtx);
1932 
1933 	init_completion(&irqs_disabled_comp);
1934 	ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1935 					 skb, &irqs_disabled_comp, false, 0);
1936 	if (ret)
1937 		goto out;
1938 
1939 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1940 
1941 	/* Wait for the completion of the IRQ disable request.
1942 	 * If there is a timeout we will try to disable irq's anyway.
1943 	 */
1944 	ret = wait_for_completion_timeout(&irqs_disabled_comp,
1945 					  SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1946 	if (!ret)
1947 		ath10k_warn(ar, "sdio irq disable request timed out\n");
1948 
1949 	sdio_claim_host(ar_sdio->func);
1950 
1951 	ret = sdio_release_irq(ar_sdio->func);
1952 	if (ret)
1953 		ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1954 
1955 	sdio_release_host(ar_sdio->func);
1956 
1957 out:
1958 	kfree_skb(skb);
1959 }
1960 
1961 static void ath10k_sdio_hif_stop(struct ath10k *ar)
1962 {
1963 	struct ath10k_sdio_bus_request *req, *tmp_req;
1964 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1965 	struct sk_buff *skb;
1966 
1967 	ath10k_sdio_irq_disable(ar);
1968 
1969 	cancel_work_sync(&ar_sdio->async_work_rx);
1970 
1971 	while ((skb = skb_dequeue(&ar_sdio->rx_head)))
1972 		dev_kfree_skb_any(skb);
1973 
1974 	cancel_work_sync(&ar_sdio->wr_async_work);
1975 
1976 	spin_lock_bh(&ar_sdio->wr_async_lock);
1977 
1978 	/* Free all bus requests that have not been handled */
1979 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1980 		struct ath10k_htc_ep *ep;
1981 
1982 		list_del(&req->list);
1983 
1984 		if (req->htc_msg) {
1985 			ep = &ar->htc.endpoint[req->eid];
1986 			ath10k_htc_notify_tx_completion(ep, req->skb);
1987 		} else if (req->skb) {
1988 			kfree_skb(req->skb);
1989 		}
1990 		ath10k_sdio_free_bus_req(ar, req);
1991 	}
1992 
1993 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1994 
1995 	napi_synchronize(&ar->napi);
1996 	napi_disable(&ar->napi);
1997 }
1998 
1999 #ifdef CONFIG_PM
2000 
2001 static int ath10k_sdio_hif_suspend(struct ath10k *ar)
2002 {
2003 	return 0;
2004 }
2005 
2006 static int ath10k_sdio_hif_resume(struct ath10k *ar)
2007 {
2008 	switch (ar->state) {
2009 	case ATH10K_STATE_OFF:
2010 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2011 			   "sdio resume configuring sdio\n");
2012 
2013 		/* need to set sdio settings after power is cut from sdio */
2014 		ath10k_sdio_config(ar);
2015 		break;
2016 
2017 	case ATH10K_STATE_ON:
2018 	default:
2019 		break;
2020 	}
2021 
2022 	return 0;
2023 }
2024 #endif
2025 
2026 static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
2027 					       u16 service_id,
2028 					       u8 *ul_pipe, u8 *dl_pipe)
2029 {
2030 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
2031 	struct ath10k_htc *htc = &ar->htc;
2032 	u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
2033 	enum ath10k_htc_ep_id eid;
2034 	bool ep_found = false;
2035 	int i;
2036 
2037 	/* For sdio, we are interested in the mapping between eid
2038 	 * and pipeid rather than service_id to pipe_id.
2039 	 * First we find out which eid has been allocated to the
2040 	 * service...
2041 	 */
2042 	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
2043 		if (htc->endpoint[i].service_id == service_id) {
2044 			eid = htc->endpoint[i].eid;
2045 			ep_found = true;
2046 			break;
2047 		}
2048 	}
2049 
2050 	if (!ep_found)
2051 		return -EINVAL;
2052 
2053 	/* Then we create the simplest mapping possible between pipeid
2054 	 * and eid
2055 	 */
2056 	*ul_pipe = *dl_pipe = (u8)eid;
2057 
2058 	/* Normally, HTT will use the upper part of the extended
2059 	 * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
2060 	 * the lower part (ext_info[0].htc_ext_addr).
2061 	 * If fw wants swapping of mailbox addresses, the opposite is true.
2062 	 */
2063 	if (ar_sdio->swap_mbox) {
2064 		htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2065 		wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2066 		htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2067 		wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2068 	} else {
2069 		htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2070 		wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2071 		htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2072 		wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2073 	}
2074 
2075 	switch (service_id) {
2076 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
2077 		/* HTC ctrl ep mbox address has already been setup in
2078 		 * ath10k_sdio_hif_start
2079 		 */
2080 		break;
2081 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
2082 		ar_sdio->mbox_addr[eid] = wmi_addr;
2083 		ar_sdio->mbox_size[eid] = wmi_mbox_size;
2084 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2085 			   "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
2086 			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2087 		break;
2088 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
2089 		ar_sdio->mbox_addr[eid] = htt_addr;
2090 		ar_sdio->mbox_size[eid] = htt_mbox_size;
2091 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2092 			   "sdio htt data mbox_addr 0x%x mbox_size %d\n",
2093 			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2094 		break;
2095 	default:
2096 		ath10k_warn(ar, "unsupported HTC service id: %d\n",
2097 			    service_id);
2098 		return -EINVAL;
2099 	}
2100 
2101 	return 0;
2102 }
2103 
2104 static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
2105 					     u8 *ul_pipe, u8 *dl_pipe)
2106 {
2107 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
2108 
2109 	/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
2110 	 * case) == 0
2111 	 */
2112 	*ul_pipe = 0;
2113 	*dl_pipe = 0;
2114 }
2115 
2116 static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2117 	.tx_sg			= ath10k_sdio_hif_tx_sg,
2118 	.diag_read		= ath10k_sdio_hif_diag_read,
2119 	.diag_write		= ath10k_sdio_hif_diag_write_mem,
2120 	.exchange_bmi_msg	= ath10k_sdio_bmi_exchange_msg,
2121 	.start			= ath10k_sdio_hif_start,
2122 	.stop			= ath10k_sdio_hif_stop,
2123 	.start_post		= ath10k_sdio_hif_start_post,
2124 	.get_htt_tx_complete	= ath10k_sdio_get_htt_tx_complete,
2125 	.map_service_to_pipe	= ath10k_sdio_hif_map_service_to_pipe,
2126 	.get_default_pipe	= ath10k_sdio_hif_get_default_pipe,
2127 	.power_up		= ath10k_sdio_hif_power_up,
2128 	.power_down		= ath10k_sdio_hif_power_down,
2129 #ifdef CONFIG_PM
2130 	.suspend		= ath10k_sdio_hif_suspend,
2131 	.resume			= ath10k_sdio_hif_resume,
2132 #endif
2133 };
2134 
2135 #ifdef CONFIG_PM_SLEEP
2136 
2137 /* Empty handlers so that mmc subsystem doesn't remove us entirely during
2138  * suspend. We instead follow cfg80211 suspend/resume handlers.
2139  */
2140 static int ath10k_sdio_pm_suspend(struct device *device)
2141 {
2142 	struct sdio_func *func = dev_to_sdio_func(device);
2143 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2144 	struct ath10k *ar = ar_sdio->ar;
2145 	mmc_pm_flag_t pm_flag, pm_caps;
2146 	int ret;
2147 
2148 	if (!device_may_wakeup(ar->dev))
2149 		return 0;
2150 
2151 	ath10k_sdio_set_mbox_sleep(ar, true);
2152 
2153 	pm_flag = MMC_PM_KEEP_POWER;
2154 
2155 	ret = sdio_set_host_pm_flags(func, pm_flag);
2156 	if (ret) {
2157 		pm_caps = sdio_get_host_pm_caps(func);
2158 		ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2159 			    pm_flag, pm_caps, ret);
2160 		return ret;
2161 	}
2162 
2163 	return ret;
2164 }
2165 
2166 static int ath10k_sdio_pm_resume(struct device *device)
2167 {
2168 	return 0;
2169 }
2170 
2171 static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2172 			 ath10k_sdio_pm_resume);
2173 
2174 #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2175 
2176 #else
2177 
2178 #define ATH10K_SDIO_PM_OPS NULL
2179 
2180 #endif /* CONFIG_PM_SLEEP */
2181 
2182 static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2183 {
2184 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2185 	int done;
2186 
2187 	done = ath10k_htt_rx_hl_indication(ar, budget);
2188 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2189 
2190 	if (done < budget)
2191 		napi_complete_done(ctx, done);
2192 
2193 	return done;
2194 }
2195 
2196 static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
2197 						u32 item_offset,
2198 						u32 *val)
2199 {
2200 	u32 addr;
2201 	int ret;
2202 
2203 	addr = host_interest_item_address(item_offset);
2204 
2205 	ret = ath10k_sdio_diag_read32(ar, addr, val);
2206 
2207 	if (ret)
2208 		ath10k_warn(ar, "unable to read host interest offset %d value\n",
2209 			    item_offset);
2210 
2211 	return ret;
2212 }
2213 
2214 static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
2215 				u32 buf_len)
2216 {
2217 	u32 val;
2218 	int i, ret;
2219 
2220 	for (i = 0; i < buf_len; i += 4) {
2221 		ret = ath10k_sdio_diag_read32(ar, address + i, &val);
2222 		if (ret) {
2223 			ath10k_warn(ar, "unable to read mem %d value\n", address + i);
2224 			break;
2225 		}
2226 		memcpy(buf + i, &val, 4);
2227 	}
2228 
2229 	return ret;
2230 }
2231 
2232 static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
2233 {
2234 	u32 param;
2235 
2236 	ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
2237 
2238 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
2239 
2240 	return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
2241 }
2242 
2243 static void ath10k_sdio_dump_registers(struct ath10k *ar,
2244 				       struct ath10k_fw_crash_data *crash_data,
2245 				       bool fast_dump)
2246 {
2247 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
2248 	int i, ret;
2249 	u32 reg_dump_area;
2250 
2251 	ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
2252 						   &reg_dump_area);
2253 	if (ret) {
2254 		ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
2255 		return;
2256 	}
2257 
2258 	if (fast_dump)
2259 		ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
2260 					     sizeof(reg_dump_values));
2261 	else
2262 		ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
2263 					   sizeof(reg_dump_values));
2264 
2265 	if (ret) {
2266 		ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
2267 		return;
2268 	}
2269 
2270 	ath10k_err(ar, "firmware register dump:\n");
2271 	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
2272 		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
2273 			   i,
2274 			   reg_dump_values[i],
2275 			   reg_dump_values[i + 1],
2276 			   reg_dump_values[i + 2],
2277 			   reg_dump_values[i + 3]);
2278 
2279 	if (!crash_data)
2280 		return;
2281 
2282 	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
2283 		crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
2284 }
2285 
2286 static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
2287 					   const struct ath10k_mem_region *mem_region,
2288 					   u8 *buf, size_t buf_len)
2289 {
2290 	const struct ath10k_mem_section *cur_section, *next_section;
2291 	unsigned int count, section_size, skip_size;
2292 	int ret, i, j;
2293 
2294 	if (!mem_region || !buf)
2295 		return 0;
2296 
2297 	cur_section = &mem_region->section_table.sections[0];
2298 
2299 	if (mem_region->start > cur_section->start) {
2300 		ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
2301 			    mem_region->start, cur_section->start);
2302 		return 0;
2303 	}
2304 
2305 	skip_size = cur_section->start - mem_region->start;
2306 
2307 	/* fill the gap between the first register section and register
2308 	 * start address
2309 	 */
2310 	for (i = 0; i < skip_size; i++) {
2311 		*buf = ATH10K_MAGIC_NOT_COPIED;
2312 		buf++;
2313 	}
2314 
2315 	count = 0;
2316 	i = 0;
2317 	for (; cur_section; cur_section = next_section) {
2318 		section_size = cur_section->end - cur_section->start;
2319 
2320 		if (section_size <= 0) {
2321 			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
2322 				    cur_section->start,
2323 				    cur_section->end);
2324 			break;
2325 		}
2326 
2327 		if (++i == mem_region->section_table.size) {
2328 			/* last section */
2329 			next_section = NULL;
2330 			skip_size = 0;
2331 		} else {
2332 			next_section = cur_section + 1;
2333 
2334 			if (cur_section->end > next_section->start) {
2335 				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
2336 					    next_section->start,
2337 					    cur_section->end);
2338 				break;
2339 			}
2340 
2341 			skip_size = next_section->start - cur_section->end;
2342 		}
2343 
2344 		if (buf_len < (skip_size + section_size)) {
2345 			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
2346 			break;
2347 		}
2348 
2349 		buf_len -= skip_size + section_size;
2350 
2351 		/* read section to dest memory */
2352 		ret = ath10k_sdio_read_mem(ar, cur_section->start,
2353 					   buf, section_size);
2354 		if (ret) {
2355 			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
2356 				    cur_section->start, ret);
2357 			break;
2358 		}
2359 
2360 		buf += section_size;
2361 		count += section_size;
2362 
2363 		/* fill in the gap between this section and the next */
2364 		for (j = 0; j < skip_size; j++) {
2365 			*buf = ATH10K_MAGIC_NOT_COPIED;
2366 			buf++;
2367 		}
2368 
2369 		count += skip_size;
2370 	}
2371 
2372 	return count;
2373 }
2374 
2375 /* if an error happened returns < 0, otherwise the length */
2376 static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
2377 					   const struct ath10k_mem_region *current_region,
2378 					   u8 *buf,
2379 					   bool fast_dump)
2380 {
2381 	int ret;
2382 
2383 	if (current_region->section_table.size > 0)
2384 		/* Copy each section individually. */
2385 		return ath10k_sdio_dump_memory_section(ar,
2386 						      current_region,
2387 						      buf,
2388 						      current_region->len);
2389 
2390 	/* No individiual memory sections defined so we can
2391 	 * copy the entire memory region.
2392 	 */
2393 	if (fast_dump)
2394 		ret = ath10k_bmi_read_memory(ar,
2395 					     current_region->start,
2396 					     buf,
2397 					     current_region->len);
2398 	else
2399 		ret = ath10k_sdio_read_mem(ar,
2400 					   current_region->start,
2401 					   buf,
2402 					   current_region->len);
2403 
2404 	if (ret) {
2405 		ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
2406 			    current_region->name, ret);
2407 		return ret;
2408 	}
2409 
2410 	return current_region->len;
2411 }
2412 
2413 static void ath10k_sdio_dump_memory(struct ath10k *ar,
2414 				    struct ath10k_fw_crash_data *crash_data,
2415 				    bool fast_dump)
2416 {
2417 	const struct ath10k_hw_mem_layout *mem_layout;
2418 	const struct ath10k_mem_region *current_region;
2419 	struct ath10k_dump_ram_data_hdr *hdr;
2420 	u32 count;
2421 	size_t buf_len;
2422 	int ret, i;
2423 	u8 *buf;
2424 
2425 	if (!crash_data)
2426 		return;
2427 
2428 	mem_layout = ath10k_coredump_get_mem_layout(ar);
2429 	if (!mem_layout)
2430 		return;
2431 
2432 	current_region = &mem_layout->region_table.regions[0];
2433 
2434 	buf = crash_data->ramdump_buf;
2435 	buf_len = crash_data->ramdump_buf_len;
2436 
2437 	memset(buf, 0, buf_len);
2438 
2439 	for (i = 0; i < mem_layout->region_table.size; i++) {
2440 		count = 0;
2441 
2442 		if (current_region->len > buf_len) {
2443 			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
2444 				    current_region->name,
2445 				    current_region->len,
2446 				    buf_len);
2447 			break;
2448 		}
2449 
2450 		/* Reserve space for the header. */
2451 		hdr = (void *)buf;
2452 		buf += sizeof(*hdr);
2453 		buf_len -= sizeof(*hdr);
2454 
2455 		ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
2456 						      fast_dump);
2457 		if (ret >= 0)
2458 			count = ret;
2459 
2460 		hdr->region_type = cpu_to_le32(current_region->type);
2461 		hdr->start = cpu_to_le32(current_region->start);
2462 		hdr->length = cpu_to_le32(count);
2463 
2464 		if (count == 0)
2465 			/* Note: the header remains, just with zero length. */
2466 			break;
2467 
2468 		buf += count;
2469 		buf_len -= count;
2470 
2471 		current_region++;
2472 	}
2473 }
2474 
2475 void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
2476 {
2477 	struct ath10k_fw_crash_data *crash_data;
2478 	char guid[UUID_STRING_LEN + 1];
2479 	bool fast_dump;
2480 
2481 	fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
2482 
2483 	if (fast_dump)
2484 		ath10k_bmi_start(ar);
2485 
2486 	ar->stats.fw_crash_counter++;
2487 
2488 	ath10k_sdio_disable_intrs(ar);
2489 
2490 	crash_data = ath10k_coredump_new(ar);
2491 
2492 	if (crash_data)
2493 		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
2494 	else
2495 		scnprintf(guid, sizeof(guid), "n/a");
2496 
2497 	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
2498 	ath10k_print_driver_info(ar);
2499 	ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
2500 	ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
2501 
2502 	ath10k_sdio_enable_intrs(ar);
2503 
2504 	ath10k_core_start_recovery(ar);
2505 }
2506 
2507 static int ath10k_sdio_probe(struct sdio_func *func,
2508 			     const struct sdio_device_id *id)
2509 {
2510 	struct ath10k_sdio *ar_sdio;
2511 	struct ath10k *ar;
2512 	enum ath10k_hw_rev hw_rev;
2513 	u32 dev_id_base;
2514 	struct ath10k_bus_params bus_params = {};
2515 	int ret, i;
2516 
2517 	/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
2518 	 * If there will be newer chipsets that does not use the hw reg
2519 	 * setup as defined in qca6174_regs and qca6174_values, this
2520 	 * assumption is no longer valid and hw_rev must be setup differently
2521 	 * depending on chipset.
2522 	 */
2523 	hw_rev = ATH10K_HW_QCA6174;
2524 
2525 	ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2526 				hw_rev, &ath10k_sdio_hif_ops);
2527 	if (!ar) {
2528 		dev_err(&func->dev, "failed to allocate core\n");
2529 		return -ENOMEM;
2530 	}
2531 
2532 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
2533 		       ATH10K_NAPI_BUDGET);
2534 
2535 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
2536 		   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2537 		   func->num, func->vendor, func->device,
2538 		   func->max_blksize, func->cur_blksize);
2539 
2540 	ar_sdio = ath10k_sdio_priv(ar);
2541 
2542 	ar_sdio->irq_data.irq_proc_reg =
2543 		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2544 			     GFP_KERNEL);
2545 	if (!ar_sdio->irq_data.irq_proc_reg) {
2546 		ret = -ENOMEM;
2547 		goto err_core_destroy;
2548 	}
2549 
2550 	ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2551 	if (!ar_sdio->vsg_buffer) {
2552 		ret = -ENOMEM;
2553 		goto err_core_destroy;
2554 	}
2555 
2556 	ar_sdio->irq_data.irq_en_reg =
2557 		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2558 			     GFP_KERNEL);
2559 	if (!ar_sdio->irq_data.irq_en_reg) {
2560 		ret = -ENOMEM;
2561 		goto err_core_destroy;
2562 	}
2563 
2564 	ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2565 	if (!ar_sdio->bmi_buf) {
2566 		ret = -ENOMEM;
2567 		goto err_core_destroy;
2568 	}
2569 
2570 	ar_sdio->func = func;
2571 	sdio_set_drvdata(func, ar_sdio);
2572 
2573 	ar_sdio->is_disabled = true;
2574 	ar_sdio->ar = ar;
2575 
2576 	spin_lock_init(&ar_sdio->lock);
2577 	spin_lock_init(&ar_sdio->wr_async_lock);
2578 	mutex_init(&ar_sdio->irq_data.mtx);
2579 
2580 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2581 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2582 
2583 	INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2584 	ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2585 	if (!ar_sdio->workqueue) {
2586 		ret = -ENOMEM;
2587 		goto err_core_destroy;
2588 	}
2589 
2590 	for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2591 		ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2592 
2593 	skb_queue_head_init(&ar_sdio->rx_head);
2594 	INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2595 
2596 	dev_id_base = (id->device & 0x0F00);
2597 	if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2598 	    dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
2599 		ret = -ENODEV;
2600 		ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2601 			   dev_id_base, id->device);
2602 		goto err_free_wq;
2603 	}
2604 
2605 	ar->dev_id = QCA9377_1_0_DEVICE_ID;
2606 	ar->id.vendor = id->vendor;
2607 	ar->id.device = id->device;
2608 
2609 	ath10k_sdio_set_mbox_info(ar);
2610 
2611 	bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2612 	/* TODO: don't know yet how to get chip_id with SDIO */
2613 	bus_params.chip_id = 0;
2614 	bus_params.hl_msdu_ids = true;
2615 
2616 	ar->hw->max_mtu = ETH_DATA_LEN;
2617 
2618 	ret = ath10k_core_register(ar, &bus_params);
2619 	if (ret) {
2620 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
2621 		goto err_free_wq;
2622 	}
2623 
2624 	timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
2625 
2626 	return 0;
2627 
2628 err_free_wq:
2629 	destroy_workqueue(ar_sdio->workqueue);
2630 err_core_destroy:
2631 	ath10k_core_destroy(ar);
2632 
2633 	return ret;
2634 }
2635 
2636 static void ath10k_sdio_remove(struct sdio_func *func)
2637 {
2638 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2639 	struct ath10k *ar = ar_sdio->ar;
2640 
2641 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
2642 		   "sdio removed func %d vendor 0x%x device 0x%x\n",
2643 		   func->num, func->vendor, func->device);
2644 
2645 	ath10k_core_unregister(ar);
2646 
2647 	netif_napi_del(&ar->napi);
2648 
2649 	ath10k_core_destroy(ar);
2650 
2651 	flush_workqueue(ar_sdio->workqueue);
2652 	destroy_workqueue(ar_sdio->workqueue);
2653 }
2654 
2655 static const struct sdio_device_id ath10k_sdio_devices[] = {
2656 	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2657 	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
2658 	{},
2659 };
2660 
2661 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2662 
2663 static struct sdio_driver ath10k_sdio_driver = {
2664 	.name = "ath10k_sdio",
2665 	.id_table = ath10k_sdio_devices,
2666 	.probe = ath10k_sdio_probe,
2667 	.remove = ath10k_sdio_remove,
2668 	.drv = {
2669 		.owner = THIS_MODULE,
2670 		.pm = ATH10K_SDIO_PM_OPS,
2671 	},
2672 };
2673 
2674 static int __init ath10k_sdio_init(void)
2675 {
2676 	int ret;
2677 
2678 	ret = sdio_register_driver(&ath10k_sdio_driver);
2679 	if (ret)
2680 		pr_err("sdio driver registration failed: %d\n", ret);
2681 
2682 	return ret;
2683 }
2684 
2685 static void __exit ath10k_sdio_exit(void)
2686 {
2687 	sdio_unregister_driver(&ath10k_sdio_driver);
2688 }
2689 
2690 module_init(ath10k_sdio_init);
2691 module_exit(ath10k_sdio_exit);
2692 
2693 MODULE_AUTHOR("Qualcomm Atheros");
2694 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2695 MODULE_LICENSE("Dual BSD/GPL");
2696