1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (c) 2004-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> 6 */ 7 8 #ifndef _SDIO_H_ 9 #define _SDIO_H_ 10 11 #define ATH10K_HIF_MBOX_BLOCK_SIZE 256 12 13 #define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/ 14 15 /* Mailbox address in SDIO address space */ 16 #define ATH10K_HIF_MBOX_BASE_ADDR 0x1000 17 #define ATH10K_HIF_MBOX_WIDTH 0x800 18 19 #define ATH10K_HIF_MBOX_TOT_WIDTH \ 20 (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH) 21 22 #define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000 23 #define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024) 24 #define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024) 25 #define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024) 26 #define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024) 27 28 #define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \ 29 (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr)) 30 31 #define ATH10K_HIF_MBOX_NUM_MAX 4 32 #define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64 33 34 #define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ) 35 36 /* HTC runs over mailbox 0 */ 37 #define ATH10K_HTC_MAILBOX 0 38 #define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX) 39 40 /* GMBOX addresses */ 41 #define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000 42 #define ATH10K_HIF_GMBOX_WIDTH 0x4000 43 44 /* Modified versions of the sdio.h macros. 45 * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET} 46 * macros in bitfield.h, so we define our own macros here. 47 */ 48 #define ATH10K_SDIO_DRIVE_DTSX_MASK \ 49 (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT) 50 51 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0 52 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1 53 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2 54 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3 55 56 /* SDIO CCCR register definitions */ 57 #define CCCR_SDIO_IRQ_MODE_REG 0xF0 58 #define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16 59 60 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2 61 62 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02 63 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04 64 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08 65 66 #define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0 67 #define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0 68 69 /* mode to enable special 4-bit interrupt assertion without clock */ 70 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0) 71 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1) 72 73 #define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01 74 75 /* The theoretical maximum number of RX messages that can be fetched 76 * from the mbox interrupt handler in one loop is derived in the following 77 * way: 78 * 79 * Let's assume that each packet in a bundle of the maximum bundle size 80 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set 81 * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE). 82 * 83 * in this case the driver must allocate 84 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's. 85 */ 86 #define ATH10K_SDIO_MAX_RX_MSGS \ 87 (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) 88 89 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u 90 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF 91 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000 92 93 struct ath10k_sdio_bus_request { 94 struct list_head list; 95 96 /* sdio address */ 97 u32 address; 98 99 struct sk_buff *skb; 100 enum ath10k_htc_ep_id eid; 101 int status; 102 /* Specifies if the current request is an HTC message. 103 * If not, the eid is not applicable an the TX completion handler 104 * associated with the endpoint will not be invoked. 105 */ 106 bool htc_msg; 107 /* Completion that (if set) will be invoked for non HTC requests 108 * (htc_msg == false) when the request has been processed. 109 */ 110 struct completion *comp; 111 }; 112 113 struct ath10k_sdio_rx_data { 114 struct sk_buff *skb; 115 size_t alloc_len; 116 size_t act_len; 117 enum ath10k_htc_ep_id eid; 118 bool part_of_bundle; 119 bool last_in_bundle; 120 bool trailer_only; 121 }; 122 123 struct ath10k_sdio_irq_proc_regs { 124 u8 host_int_status; 125 u8 cpu_int_status; 126 u8 error_int_status; 127 u8 counter_int_status; 128 u8 mbox_frame; 129 u8 rx_lookahead_valid; 130 u8 host_int_status2; 131 u8 gmbox_rx_avail; 132 __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX]; 133 __le32 int_status_enable; 134 }; 135 136 struct ath10k_sdio_irq_enable_regs { 137 u8 int_status_en; 138 u8 cpu_int_status_en; 139 u8 err_int_status_en; 140 u8 cntr_int_status_en; 141 }; 142 143 struct ath10k_sdio_irq_data { 144 /* protects irq_proc_reg and irq_en_reg below. 145 * We use a mutex here and not a spinlock since we will have the 146 * mutex locked while calling the sdio_memcpy_ functions. 147 * These function require non atomic context, and hence, spinlocks 148 * can be held while calling these functions. 149 */ 150 struct mutex mtx; 151 struct ath10k_sdio_irq_proc_regs *irq_proc_reg; 152 struct ath10k_sdio_irq_enable_regs *irq_en_reg; 153 }; 154 155 struct ath10k_mbox_ext_info { 156 u32 htc_ext_addr; 157 u32 htc_ext_sz; 158 }; 159 160 struct ath10k_mbox_info { 161 u32 htc_addr; 162 struct ath10k_mbox_ext_info ext_info[2]; 163 u32 block_size; 164 u32 block_mask; 165 u32 gmbox_addr; 166 u32 gmbox_sz; 167 }; 168 169 struct ath10k_sdio { 170 struct sdio_func *func; 171 172 struct ath10k_mbox_info mbox_info; 173 bool swap_mbox; 174 u32 mbox_addr[ATH10K_HTC_EP_COUNT]; 175 u32 mbox_size[ATH10K_HTC_EP_COUNT]; 176 177 /* available bus requests */ 178 struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM]; 179 /* free list of bus requests */ 180 struct list_head bus_req_freeq; 181 182 struct sk_buff_head rx_head; 183 184 /* protects access to bus_req_freeq */ 185 spinlock_t lock; 186 187 struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS]; 188 size_t n_rx_pkts; 189 190 struct ath10k *ar; 191 struct ath10k_sdio_irq_data irq_data; 192 193 /* temporary buffer for sdio read. 194 * It is allocated when probe, and used for receive bundled packets, 195 * the read for bundled packets is not parallel, so it does not need 196 * protected. 197 */ 198 u8 *vsg_buffer; 199 200 /* temporary buffer for BMI requests */ 201 u8 *bmi_buf; 202 203 bool is_disabled; 204 205 struct workqueue_struct *workqueue; 206 struct work_struct wr_async_work; 207 struct list_head wr_asyncq; 208 /* protects access to wr_asyncq */ 209 spinlock_t wr_async_lock; 210 211 struct work_struct async_work_rx; 212 }; 213 214 static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar) 215 { 216 return (struct ath10k_sdio *)ar->drv_priv; 217 } 218 219 #endif 220