Lines Matching +full:no +full:- +full:sdio
1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
14 #include <linux/mmc/sdio.h>
25 #include "sdio.h"
37 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask); in ath10k_sdio_calc_txrx_padded_len()
47 dev_kfree_skb(pkt->skb); in ath10k_sdio_mbox_free_rx_pkt()
48 pkt->skb = NULL; in ath10k_sdio_mbox_free_rx_pkt()
49 pkt->alloc_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
50 pkt->act_len = 0; in ath10k_sdio_mbox_free_rx_pkt()
51 pkt->trailer_only = false; in ath10k_sdio_mbox_free_rx_pkt()
59 pkt->skb = dev_alloc_skb(full_len); in ath10k_sdio_mbox_alloc_rx_pkt()
60 if (!pkt->skb) in ath10k_sdio_mbox_alloc_rx_pkt()
61 return -ENOMEM; in ath10k_sdio_mbox_alloc_rx_pkt()
63 pkt->act_len = act_len; in ath10k_sdio_mbox_alloc_rx_pkt()
64 pkt->alloc_len = full_len; in ath10k_sdio_mbox_alloc_rx_pkt()
65 pkt->part_of_bundle = part_of_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
66 pkt->last_in_bundle = last_in_bundle; in ath10k_sdio_mbox_alloc_rx_pkt()
67 pkt->trailer_only = false; in ath10k_sdio_mbox_alloc_rx_pkt()
76 (struct ath10k_htc_hdr *)pkt->skb->data; in is_trailer_only_msg()
77 u16 len = __le16_to_cpu(htc_hdr->len); in is_trailer_only_msg()
79 if (len == htc_hdr->trailer_len) in is_trailer_only_msg()
85 /* sdio/mmc functions */
110 return mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_wr_byte()
125 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0); in ath10k_sdio_func0_cmd52_rd_byte()
135 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_config()
139 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n"); in ath10k_sdio_config()
144 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
152 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
158 func->card, in ath10k_sdio_config()
166 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
175 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
181 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
185 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n", in ath10k_sdio_config()
191 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card, in ath10k_sdio_config()
198 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card, in ath10k_sdio_config()
203 func->enable_timeout = 100; in ath10k_sdio_config()
205 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size); in ath10k_sdio_config()
207 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n", in ath10k_sdio_config()
208 ar_sdio->mbox_info.block_size, ret); in ath10k_sdio_config()
220 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write32()
232 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n", in ath10k_sdio_write32()
244 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_writesb32()
250 return -ENOMEM; in ath10k_sdio_writesb32()
263 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n", in ath10k_sdio_writesb32()
277 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read32()
288 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n", in ath10k_sdio_read32()
300 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_read()
312 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_read()
314 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len); in ath10k_sdio_read()
325 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_write()
340 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_write()
342 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len); in ath10k_sdio_write()
353 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_readsb()
358 len = round_down(len, ar_sdio->mbox_info.block_size); in ath10k_sdio_readsb()
367 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n", in ath10k_sdio_readsb()
369 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len); in ath10k_sdio_readsb()
384 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packet()
385 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_process_packet()
386 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_process_packet()
387 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; in ath10k_sdio_mbox_rx_process_packet()
393 trailer = skb->data + skb->len - htc_hdr->trailer_len; in ath10k_sdio_mbox_rx_process_packet()
395 eid = pipe_id_to_eid(htc_hdr->eid); in ath10k_sdio_mbox_rx_process_packet()
399 htc_hdr->trailer_len, in ath10k_sdio_mbox_rx_process_packet()
407 pkt->trailer_only = true; in ath10k_sdio_mbox_rx_process_packet()
409 skb_trim(skb, skb->len - htc_hdr->trailer_len); in ath10k_sdio_mbox_rx_process_packet()
422 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_mbox_rx_process_packets()
431 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_process_packets()
436 &lookaheads[lookahead_idx++])->eid; in ath10k_sdio_mbox_rx_process_packets()
439 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", in ath10k_sdio_mbox_rx_process_packets()
441 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
445 ep = &htc->endpoint[id]; in ath10k_sdio_mbox_rx_process_packets()
447 if (ep->service_id == 0) { in ath10k_sdio_mbox_rx_process_packets()
449 ret = -ENOMEM; in ath10k_sdio_mbox_rx_process_packets()
453 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_process_packets()
455 if (pkt->part_of_bundle && !pkt->last_in_bundle) { in ath10k_sdio_mbox_rx_process_packets()
459 lookahead_idx--; in ath10k_sdio_mbox_rx_process_packets()
471 if (!pkt->trailer_only) { in ath10k_sdio_mbox_rx_process_packets()
472 cb = ATH10K_SKB_RXCB(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
473 cb->eid = id; in ath10k_sdio_mbox_rx_process_packets()
475 skb_queue_tail(&ar_sdio->rx_head, pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
476 queue_work(ar->workqueue_aux, in ath10k_sdio_mbox_rx_process_packets()
477 &ar_sdio->async_work_rx); in ath10k_sdio_mbox_rx_process_packets()
479 kfree_skb(pkt->skb); in ath10k_sdio_mbox_rx_process_packets()
483 pkt->skb = NULL; in ath10k_sdio_mbox_rx_process_packets()
484 pkt->alloc_len = 0; in ath10k_sdio_mbox_rx_process_packets()
493 for (; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_process_packets()
494 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_process_packets()
506 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle; in ath10k_sdio_mbox_alloc_bundle()
508 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags); in ath10k_sdio_mbox_alloc_bundle()
513 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_alloc_bundle()
515 return -ENOMEM; in ath10k_sdio_mbox_alloc_bundle()
550 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
558 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) { in ath10k_sdio_mbox_rx_alloc()
560 le16_to_cpu(htc_hdr->len), in ath10k_sdio_mbox_rx_alloc()
562 ret = -ENOMEM; in ath10k_sdio_mbox_rx_alloc()
570 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_alloc()
575 htc_hdr->eid, htc_hdr->flags, in ath10k_sdio_mbox_rx_alloc()
576 le16_to_cpu(htc_hdr->len)); in ath10k_sdio_mbox_rx_alloc()
577 ret = -EINVAL; in ath10k_sdio_mbox_rx_alloc()
582 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) { in ath10k_sdio_mbox_rx_alloc()
590 &ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
612 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK) in ath10k_sdio_mbox_rx_alloc()
615 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt], in ath10k_sdio_mbox_rx_alloc()
628 ar_sdio->n_rx_pkts = pkt_cnt; in ath10k_sdio_mbox_rx_alloc()
634 if (!ar_sdio->rx_pkts[i].alloc_len) in ath10k_sdio_mbox_rx_alloc()
636 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_alloc()
645 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0]; in ath10k_sdio_mbox_rx_fetch()
646 struct sk_buff *skb = pkt->skb; in ath10k_sdio_mbox_rx_fetch()
650 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch()
651 skb->data, pkt->alloc_len); in ath10k_sdio_mbox_rx_fetch()
655 htc_hdr = (struct ath10k_htc_hdr *)skb->data; in ath10k_sdio_mbox_rx_fetch()
656 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch()
658 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch()
659 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch()
663 skb_put(skb, pkt->act_len); in ath10k_sdio_mbox_rx_fetch()
667 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch()
682 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
683 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
686 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
687 ret = -E2BIG; in ath10k_sdio_mbox_rx_fetch_bundle()
691 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr, in ath10k_sdio_mbox_rx_fetch_bundle()
692 ar_sdio->vsg_buffer, virt_pkt_len); in ath10k_sdio_mbox_rx_fetch_bundle()
699 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { in ath10k_sdio_mbox_rx_fetch_bundle()
700 pkt = &ar_sdio->rx_pkts[i]; in ath10k_sdio_mbox_rx_fetch_bundle()
701 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset); in ath10k_sdio_mbox_rx_fetch_bundle()
702 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr); in ath10k_sdio_mbox_rx_fetch_bundle()
704 if (pkt->act_len > pkt->alloc_len) { in ath10k_sdio_mbox_rx_fetch_bundle()
705 ret = -EINVAL; in ath10k_sdio_mbox_rx_fetch_bundle()
709 skb_put_data(pkt->skb, htc_hdr, pkt->act_len); in ath10k_sdio_mbox_rx_fetch_bundle()
710 pkt_offset += pkt->alloc_len; in ath10k_sdio_mbox_rx_fetch_bundle()
717 for (i = 0; i < ar_sdio->n_rx_pkts; i++) in ath10k_sdio_mbox_rx_fetch_bundle()
718 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]); in ath10k_sdio_mbox_rx_fetch_bundle()
720 ar_sdio->n_rx_pkts = 0; in ath10k_sdio_mbox_rx_fetch_bundle()
725 /* This is the timeout for mailbox processing done in the sdio irq
726 * handler. The timeout is deliberately set quite high since SDIO dump logs
758 if (ar_sdio->n_rx_pkts >= 2) in ath10k_sdio_mbox_rxmsg_pending_handler()
760 * re-check again. in ath10k_sdio_mbox_rxmsg_pending_handler()
764 if (ar_sdio->n_rx_pkts > 1) in ath10k_sdio_mbox_rxmsg_pending_handler()
783 * flag that we should re-check IRQ status registers again in ath10k_sdio_mbox_rxmsg_pending_handler()
790 if (ret && (ret != -ECANCELED)) in ath10k_sdio_mbox_rxmsg_pending_handler()
818 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_counter_intr()
822 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
823 counter_int_status = irq_data->irq_proc_reg->counter_int_status & in ath10k_sdio_mbox_proc_counter_intr()
824 irq_data->irq_en_reg->cntr_int_status_en; in ath10k_sdio_mbox_proc_counter_intr()
835 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_counter_intr()
843 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_err_intr()
847 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n"); in ath10k_sdio_mbox_proc_err_intr()
849 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F; in ath10k_sdio_mbox_proc_err_intr()
853 return -EIO; in ath10k_sdio_mbox_proc_err_intr()
857 "sdio error_int_status 0x%x\n", error_int_status); in ath10k_sdio_mbox_proc_err_intr()
861 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n"); in ath10k_sdio_mbox_proc_err_intr()
872 irq_data->irq_proc_reg->error_int_status &= ~error_int_status; in ath10k_sdio_mbox_proc_err_intr()
889 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_proc_cpu_intr()
893 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
894 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status & in ath10k_sdio_mbox_proc_cpu_intr()
895 irq_data->irq_en_reg->cpu_int_status_en; in ath10k_sdio_mbox_proc_cpu_intr()
898 ret = -EIO; in ath10k_sdio_mbox_proc_cpu_intr()
903 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status; in ath10k_sdio_mbox_proc_cpu_intr()
906 * this is done to make the access 4-byte aligned to mitigate issues in ath10k_sdio_mbox_proc_cpu_intr()
908 * be a multiple of 4-bytes. in ath10k_sdio_mbox_proc_cpu_intr()
921 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_proc_cpu_intr()
933 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_mbox_read_int_status()
934 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg; in ath10k_sdio_mbox_read_int_status()
935 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg; in ath10k_sdio_mbox_read_int_status()
939 mutex_lock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
950 if (!irq_en_reg->int_status_en) { in ath10k_sdio_mbox_read_int_status()
969 *host_int_status = irq_proc_reg->host_int_status & in ath10k_sdio_mbox_read_int_status()
970 irq_en_reg->int_status_en; in ath10k_sdio_mbox_read_int_status()
983 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) { in ath10k_sdio_mbox_read_int_status()
985 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]); in ath10k_sdio_mbox_read_int_status()
987 ath10k_warn(ar, "sdio mbox lookahead is zero\n"); in ath10k_sdio_mbox_read_int_status()
991 mutex_unlock(&irq_data->mtx); in ath10k_sdio_mbox_read_int_status()
1024 "sdio pending mailbox msg lookahead 0x%08x\n", in ath10k_sdio_mbox_proc_pending_irqs()
1036 "sdio host_int_status 0x%x\n", host_int_status); in ath10k_sdio_mbox_proc_pending_irqs()
1060 * unnecessarily which can re-wake the target, if upper layers in ath10k_sdio_mbox_proc_pending_irqs()
1061 * determine that we are in a low-throughput mode, we can rely on in ath10k_sdio_mbox_proc_pending_irqs()
1062 * taking another interrupt rather than re-checking the status in ath10k_sdio_mbox_proc_pending_irqs()
1063 * registers which can re-wake the target. in ath10k_sdio_mbox_proc_pending_irqs()
1072 "sdio pending irqs done %d status %d", in ath10k_sdio_mbox_proc_pending_irqs()
1081 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_set_mbox_info()
1082 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev; in ath10k_sdio_set_mbox_info()
1084 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1085 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE; in ath10k_sdio_set_mbox_info()
1086 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1; in ath10k_sdio_set_mbox_info()
1087 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1088 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH; in ath10k_sdio_set_mbox_info()
1090 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR; in ath10k_sdio_set_mbox_info()
1097 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1103 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1107 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1111 mbox_info->ext_info[0].htc_ext_sz = in ath10k_sdio_set_mbox_info()
1115 mbox_info->ext_info[1].htc_ext_addr = in ath10k_sdio_set_mbox_info()
1116 mbox_info->ext_info[0].htc_ext_addr + in ath10k_sdio_set_mbox_info()
1117 mbox_info->ext_info[0].htc_ext_sz + in ath10k_sdio_set_mbox_info()
1119 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH; in ath10k_sdio_set_mbox_info()
1136 /* Hit the credit counter with a 4-byte access, the first byte in ath10k_sdio_bmi_credits()
1138 * remaining 3 bytes has no effect. The rationale behind this in ath10k_sdio_bmi_credits()
1139 * is to make all HIF accesses 4-byte aligned. in ath10k_sdio_bmi_credits()
1157 return -ETIMEDOUT; in ath10k_sdio_bmi_credits()
1187 return -EINVAL; in ath10k_sdio_bmi_get_rx_lookahead()
1206 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1208 memcpy(ar_sdio->bmi_buf, req, req_len); in ath10k_sdio_bmi_exchange_msg()
1209 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len); in ath10k_sdio_bmi_exchange_msg()
1219 /* No response expected */ in ath10k_sdio_bmi_exchange_msg()
1229 * In particular, this avoids SDIO timeouts and possibly garbage in ath10k_sdio_bmi_exchange_msg()
1231 * such as Compact Flash (as well as some SDIO masters) which in ath10k_sdio_bmi_exchange_msg()
1241 * not occur in practice -- they're supported for debug/development. in ath10k_sdio_bmi_exchange_msg()
1262 * If BMI_EXECUTE ever needs to support longer-latency execution, in ath10k_sdio_bmi_exchange_msg()
1272 addr = ar_sdio->mbox_info.htc_addr; in ath10k_sdio_bmi_exchange_msg()
1273 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1281 memcpy(resp, ar_sdio->bmi_buf, *resp_len); in ath10k_sdio_bmi_exchange_msg()
1286 /* sdio async handling functions */
1294 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1296 if (list_empty(&ar_sdio->bus_req_freeq)) { in ath10k_sdio_alloc_busreq()
1301 bus_req = list_first_entry(&ar_sdio->bus_req_freeq, in ath10k_sdio_alloc_busreq()
1303 list_del(&bus_req->list); in ath10k_sdio_alloc_busreq()
1306 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_alloc_busreq()
1317 spin_lock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1318 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); in ath10k_sdio_free_bus_req()
1319 spin_unlock_bh(&ar_sdio->lock); in ath10k_sdio_free_bus_req()
1329 skb = req->skb; in __ath10k_sdio_write_async()
1330 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len); in __ath10k_sdio_write_async()
1333 req->address, ret); in __ath10k_sdio_write_async()
1335 if (req->htc_msg) { in __ath10k_sdio_write_async()
1336 ep = &ar->htc.endpoint[req->eid]; in __ath10k_sdio_write_async()
1338 } else if (req->comp) { in __ath10k_sdio_write_async()
1339 complete(req->comp); in __ath10k_sdio_write_async()
1346 * this way SDIO bus is utilised much better.
1352 struct ath10k *ar = ar_sdio->ar; in ath10k_rx_indication_async_work()
1358 skb = skb_dequeue(&ar_sdio->rx_head); in ath10k_rx_indication_async_work()
1362 ep = &ar->htc.endpoint[cb->eid]; in ath10k_rx_indication_async_work()
1363 ep->ep_ops.ep_rx_complete(ar, skb); in ath10k_rx_indication_async_work()
1366 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { in ath10k_rx_indication_async_work()
1368 napi_schedule(&ar->napi); in ath10k_rx_indication_async_work()
1375 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_read_rtc_state()
1379 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret); in ath10k_sdio_read_rtc_state()
1397 sdio_claim_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1408 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE; in ath10k_sdio_set_mbox_sleep()
1411 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE; in ath10k_sdio_set_mbox_sleep()
1430 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n", in ath10k_sdio_set_mbox_sleep()
1437 retry--; in ath10k_sdio_set_mbox_sleep()
1442 sdio_release_host(ar_sdio->func); in ath10k_sdio_set_mbox_sleep()
1451 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE; in ath10k_sdio_sleep_timer_handler()
1452 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_sleep_timer_handler()
1459 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_write_async_work()
1461 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info; in ath10k_sdio_write_async_work()
1463 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1465 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_write_async_work()
1466 list_del(&req->list); in ath10k_sdio_write_async_work()
1467 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1469 if (req->address >= mbox_info->htc_addr && in ath10k_sdio_write_async_work()
1470 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) { in ath10k_sdio_write_async_work()
1472 mod_timer(&ar_sdio->sleep_timer, jiffies + in ath10k_sdio_write_async_work()
1477 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1480 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_write_async_work()
1482 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE) in ath10k_sdio_write_async_work()
1495 * SDIO workqueue. in ath10k_sdio_prep_async_req()
1501 return -ENOMEM; in ath10k_sdio_prep_async_req()
1504 bus_req->skb = skb; in ath10k_sdio_prep_async_req()
1505 bus_req->eid = eid; in ath10k_sdio_prep_async_req()
1506 bus_req->address = addr; in ath10k_sdio_prep_async_req()
1507 bus_req->htc_msg = htc_msg; in ath10k_sdio_prep_async_req()
1508 bus_req->comp = comp; in ath10k_sdio_prep_async_req()
1510 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1511 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); in ath10k_sdio_prep_async_req()
1512 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_prep_async_req()
1522 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_irq_handler()
1530 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1541 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_handler()
1543 if (ret && ret != -ECANCELED) in ath10k_sdio_irq_handler()
1544 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n", in ath10k_sdio_irq_handler()
1548 /* sdio HIF functions */
1553 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_disable_intrs()
1554 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_disable_intrs()
1557 mutex_lock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1561 ®s->int_status_en, sizeof(*regs)); in ath10k_sdio_disable_intrs()
1563 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret); in ath10k_sdio_disable_intrs()
1565 mutex_unlock(&irq_data->mtx); in ath10k_sdio_disable_intrs()
1574 struct sdio_func *func = ar_sdio->func; in ath10k_sdio_hif_power_up()
1577 if (!ar_sdio->is_disabled) in ath10k_sdio_hif_power_up()
1580 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); in ath10k_sdio_hif_power_up()
1584 ath10k_err(ar, "failed to config sdio: %d\n", ret); in ath10k_sdio_hif_power_up()
1592 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret); in ath10k_sdio_hif_power_up()
1604 ar_sdio->is_disabled = false; in ath10k_sdio_hif_power_up()
1618 if (ar_sdio->is_disabled) in ath10k_sdio_hif_power_down()
1621 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); in ath10k_sdio_hif_power_down()
1623 del_timer_sync(&ar_sdio->sleep_timer); in ath10k_sdio_hif_power_down()
1627 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1629 ret = sdio_disable_func(ar_sdio->func); in ath10k_sdio_hif_power_down()
1631 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); in ath10k_sdio_hif_power_down()
1632 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1636 ret = mmc_hw_reset(ar_sdio->func->card); in ath10k_sdio_hif_power_down()
1638 ath10k_warn(ar, "unable to reset sdio: %d\n", ret); in ath10k_sdio_hif_power_down()
1640 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_power_down()
1642 ar_sdio->is_disabled = true; in ath10k_sdio_hif_power_down()
1661 skb->len); in ath10k_sdio_hif_tx_sg()
1665 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] - in ath10k_sdio_hif_tx_sg()
1666 skb->len; in ath10k_sdio_hif_tx_sg()
1673 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_hif_tx_sg()
1681 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_enable_intrs()
1682 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_enable_intrs()
1685 mutex_lock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1688 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) | in ath10k_sdio_enable_intrs()
1695 regs->int_status_en |= in ath10k_sdio_enable_intrs()
1701 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1); in ath10k_sdio_enable_intrs()
1704 regs->err_int_status_en = in ath10k_sdio_enable_intrs()
1711 regs->cntr_int_status_en = in ath10k_sdio_enable_intrs()
1716 ®s->int_status_en, sizeof(*regs)); in ath10k_sdio_enable_intrs()
1722 mutex_unlock(&irq_data->mtx); in ath10k_sdio_enable_intrs()
1736 return -ENOMEM; in ath10k_sdio_hif_diag_read()
1769 return -ENOMEM; in ath10k_sdio_diag_read32()
1823 "sdio mailbox swap service enabled\n"); in ath10k_sdio_hif_start_post()
1824 ar_sdio->swap_mbox = true; in ath10k_sdio_hif_start_post()
1827 "sdio mailbox swap service disabled\n"); in ath10k_sdio_hif_start_post()
1828 ar_sdio->swap_mbox = false; in ath10k_sdio_hif_start_post()
1852 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n", in ath10k_sdio_get_htt_tx_complete()
1879 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_start()
1880 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_start()
1882 sdio_claim_host(ar_sdio->func); in ath10k_sdio_hif_start()
1885 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler); in ath10k_sdio_hif_start()
1887 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret); in ath10k_sdio_hif_start()
1888 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1892 sdio_release_host(ar_sdio->func); in ath10k_sdio_hif_start()
1896 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); in ath10k_sdio_hif_start()
1918 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data; in ath10k_sdio_irq_disable()
1919 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg; in ath10k_sdio_irq_disable()
1928 mutex_lock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1931 memcpy(skb->data, regs, sizeof(*regs)); in ath10k_sdio_irq_disable()
1934 mutex_unlock(&irq_data->mtx); in ath10k_sdio_irq_disable()
1942 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work); in ath10k_sdio_irq_disable()
1950 ath10k_warn(ar, "sdio irq disable request timed out\n"); in ath10k_sdio_irq_disable()
1952 sdio_claim_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1954 ret = sdio_release_irq(ar_sdio->func); in ath10k_sdio_irq_disable()
1956 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret); in ath10k_sdio_irq_disable()
1958 sdio_release_host(ar_sdio->func); in ath10k_sdio_irq_disable()
1972 cancel_work_sync(&ar_sdio->async_work_rx); in ath10k_sdio_hif_stop()
1974 while ((skb = skb_dequeue(&ar_sdio->rx_head))) in ath10k_sdio_hif_stop()
1977 cancel_work_sync(&ar_sdio->wr_async_work); in ath10k_sdio_hif_stop()
1979 spin_lock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
1982 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { in ath10k_sdio_hif_stop()
1985 list_del(&req->list); in ath10k_sdio_hif_stop()
1987 if (req->htc_msg) { in ath10k_sdio_hif_stop()
1988 ep = &ar->htc.endpoint[req->eid]; in ath10k_sdio_hif_stop()
1989 ath10k_htc_notify_tx_completion(ep, req->skb); in ath10k_sdio_hif_stop()
1990 } else if (req->skb) { in ath10k_sdio_hif_stop()
1991 kfree_skb(req->skb); in ath10k_sdio_hif_stop()
1996 spin_unlock_bh(&ar_sdio->wr_async_lock); in ath10k_sdio_hif_stop()
2010 switch (ar->state) { in ath10k_sdio_hif_resume()
2013 "sdio resume configuring sdio\n"); in ath10k_sdio_hif_resume()
2015 /* need to set sdio settings after power is cut from sdio */ in ath10k_sdio_hif_resume()
2033 struct ath10k_htc *htc = &ar->htc; in ath10k_sdio_hif_map_service_to_pipe()
2039 /* For sdio, we are interested in the mapping between eid in ath10k_sdio_hif_map_service_to_pipe()
2045 if (htc->endpoint[i].service_id == service_id) { in ath10k_sdio_hif_map_service_to_pipe()
2046 eid = htc->endpoint[i].eid; in ath10k_sdio_hif_map_service_to_pipe()
2053 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2065 if (ar_sdio->swap_mbox) { in ath10k_sdio_hif_map_service_to_pipe()
2066 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2067 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2068 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2069 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2071 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2072 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr; in ath10k_sdio_hif_map_service_to_pipe()
2073 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2074 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz; in ath10k_sdio_hif_map_service_to_pipe()
2084 ar_sdio->mbox_addr[eid] = wmi_addr; in ath10k_sdio_hif_map_service_to_pipe()
2085 ar_sdio->mbox_size[eid] = wmi_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2087 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2088 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2091 ar_sdio->mbox_addr[eid] = htt_addr; in ath10k_sdio_hif_map_service_to_pipe()
2092 ar_sdio->mbox_size[eid] = htt_mbox_size; in ath10k_sdio_hif_map_service_to_pipe()
2094 "sdio htt data mbox_addr 0x%x mbox_size %d\n", in ath10k_sdio_hif_map_service_to_pipe()
2095 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]); in ath10k_sdio_hif_map_service_to_pipe()
2100 return -EINVAL; in ath10k_sdio_hif_map_service_to_pipe()
2109 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n"); in ath10k_sdio_hif_get_default_pipe()
2146 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_pm_suspend()
2150 if (!device_may_wakeup(ar->dev)) in ath10k_sdio_pm_suspend()
2160 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n", in ath10k_sdio_pm_suspend()
2240 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); in ath10k_sdio_is_fast_dump_supported()
2285 crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]); in ath10k_sdio_dump_registers()
2299 cur_section = &mem_region->section_table.sections[0]; in ath10k_sdio_dump_memory_section()
2301 if (mem_region->start > cur_section->start) { in ath10k_sdio_dump_memory_section()
2303 mem_region->start, cur_section->start); in ath10k_sdio_dump_memory_section()
2307 skip_size = cur_section->start - mem_region->start; in ath10k_sdio_dump_memory_section()
2320 section_size = cur_section->end - cur_section->start; in ath10k_sdio_dump_memory_section()
2324 cur_section->start, in ath10k_sdio_dump_memory_section()
2325 cur_section->end); in ath10k_sdio_dump_memory_section()
2329 if (++i == mem_region->section_table.size) { in ath10k_sdio_dump_memory_section()
2336 if (cur_section->end > next_section->start) { in ath10k_sdio_dump_memory_section()
2338 next_section->start, in ath10k_sdio_dump_memory_section()
2339 cur_section->end); in ath10k_sdio_dump_memory_section()
2343 skip_size = next_section->start - cur_section->end; in ath10k_sdio_dump_memory_section()
2351 buf_len -= skip_size + section_size; in ath10k_sdio_dump_memory_section()
2354 ret = ath10k_sdio_read_mem(ar, cur_section->start, in ath10k_sdio_dump_memory_section()
2358 cur_section->start, ret); in ath10k_sdio_dump_memory_section()
2385 if (current_region->section_table.size > 0) in ath10k_sdio_dump_memory_generic()
2390 current_region->len); in ath10k_sdio_dump_memory_generic()
2392 /* No individual memory sections defined so we can in ath10k_sdio_dump_memory_generic()
2397 current_region->start, in ath10k_sdio_dump_memory_generic()
2399 current_region->len); in ath10k_sdio_dump_memory_generic()
2402 current_region->start, in ath10k_sdio_dump_memory_generic()
2404 current_region->len); in ath10k_sdio_dump_memory_generic()
2408 current_region->name, ret); in ath10k_sdio_dump_memory_generic()
2412 return current_region->len; in ath10k_sdio_dump_memory_generic()
2434 current_region = &mem_layout->region_table.regions[0]; in ath10k_sdio_dump_memory()
2436 buf = crash_data->ramdump_buf; in ath10k_sdio_dump_memory()
2437 buf_len = crash_data->ramdump_buf_len; in ath10k_sdio_dump_memory()
2441 for (i = 0; i < mem_layout->region_table.size; i++) { in ath10k_sdio_dump_memory()
2444 if (current_region->len > buf_len) { in ath10k_sdio_dump_memory()
2446 current_region->name, in ath10k_sdio_dump_memory()
2447 current_region->len, in ath10k_sdio_dump_memory()
2455 buf_len -= sizeof(*hdr); in ath10k_sdio_dump_memory()
2462 hdr->region_type = cpu_to_le32(current_region->type); in ath10k_sdio_dump_memory()
2463 hdr->start = cpu_to_le32(current_region->start); in ath10k_sdio_dump_memory()
2464 hdr->length = cpu_to_le32(count); in ath10k_sdio_dump_memory()
2471 buf_len -= count; in ath10k_sdio_dump_memory()
2488 ar->stats.fw_crash_counter++; in ath10k_sdio_fw_crashed_dump()
2495 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); in ath10k_sdio_fw_crashed_dump()
2519 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based. in ath10k_sdio_probe()
2522 * assumption is no longer valid and hw_rev must be setup differently in ath10k_sdio_probe()
2527 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO, in ath10k_sdio_probe()
2530 dev_err(&func->dev, "failed to allocate core\n"); in ath10k_sdio_probe()
2531 return -ENOMEM; in ath10k_sdio_probe()
2534 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); in ath10k_sdio_probe()
2537 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", in ath10k_sdio_probe()
2538 func->num, func->vendor, func->device, in ath10k_sdio_probe()
2539 func->max_blksize, func->cur_blksize); in ath10k_sdio_probe()
2543 ar_sdio->irq_data.irq_proc_reg = in ath10k_sdio_probe()
2544 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), in ath10k_sdio_probe()
2546 if (!ar_sdio->irq_data.irq_proc_reg) { in ath10k_sdio_probe()
2547 ret = -ENOMEM; in ath10k_sdio_probe()
2551 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2552 if (!ar_sdio->vsg_buffer) { in ath10k_sdio_probe()
2553 ret = -ENOMEM; in ath10k_sdio_probe()
2557 ar_sdio->irq_data.irq_en_reg = in ath10k_sdio_probe()
2558 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), in ath10k_sdio_probe()
2560 if (!ar_sdio->irq_data.irq_en_reg) { in ath10k_sdio_probe()
2561 ret = -ENOMEM; in ath10k_sdio_probe()
2565 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL); in ath10k_sdio_probe()
2566 if (!ar_sdio->bmi_buf) { in ath10k_sdio_probe()
2567 ret = -ENOMEM; in ath10k_sdio_probe()
2571 ar_sdio->func = func; in ath10k_sdio_probe()
2574 ar_sdio->is_disabled = true; in ath10k_sdio_probe()
2575 ar_sdio->ar = ar; in ath10k_sdio_probe()
2577 spin_lock_init(&ar_sdio->lock); in ath10k_sdio_probe()
2578 spin_lock_init(&ar_sdio->wr_async_lock); in ath10k_sdio_probe()
2579 mutex_init(&ar_sdio->irq_data.mtx); in ath10k_sdio_probe()
2581 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); in ath10k_sdio_probe()
2582 INIT_LIST_HEAD(&ar_sdio->wr_asyncq); in ath10k_sdio_probe()
2584 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work); in ath10k_sdio_probe()
2585 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); in ath10k_sdio_probe()
2586 if (!ar_sdio->workqueue) { in ath10k_sdio_probe()
2587 ret = -ENOMEM; in ath10k_sdio_probe()
2592 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]); in ath10k_sdio_probe()
2594 skb_queue_head_init(&ar_sdio->rx_head); in ath10k_sdio_probe()
2595 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work); in ath10k_sdio_probe()
2597 dev_id_base = (id->device & 0x0F00); in ath10k_sdio_probe()
2600 ret = -ENODEV; in ath10k_sdio_probe()
2602 dev_id_base, id->device); in ath10k_sdio_probe()
2606 ar->dev_id = QCA9377_1_0_DEVICE_ID; in ath10k_sdio_probe()
2607 ar->id.vendor = id->vendor; in ath10k_sdio_probe()
2608 ar->id.device = id->device; in ath10k_sdio_probe()
2613 /* TODO: don't know yet how to get chip_id with SDIO */ in ath10k_sdio_probe()
2617 ar->hw->max_mtu = ETH_DATA_LEN; in ath10k_sdio_probe()
2625 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0); in ath10k_sdio_probe()
2630 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_probe()
2640 struct ath10k *ar = ar_sdio->ar; in ath10k_sdio_remove()
2643 "sdio removed func %d vendor 0x%x device 0x%x\n", in ath10k_sdio_remove()
2644 func->num, func->vendor, func->device); in ath10k_sdio_remove()
2648 netif_napi_del(&ar->napi); in ath10k_sdio_remove()
2652 destroy_workqueue(ar_sdio->workqueue); in ath10k_sdio_remove()
2661 MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2680 pr_err("sdio driver registration failed: %d\n", ret); in ath10k_sdio_init()
2694 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");