1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _CE_H_ 19 #define _CE_H_ 20 21 #include "hif.h" 22 23 #define CE_HTT_H2T_MSG_SRC_NENTRIES 8192 24 25 /* Descriptor rings must be aligned to this boundary */ 26 #define CE_DESC_RING_ALIGN 8 27 #define CE_SEND_FLAG_GATHER 0x00010000 28 29 /* 30 * Copy Engine support: low-level Target-side Copy Engine API. 31 * This is a hardware access layer used by code that understands 32 * how to use copy engines. 33 */ 34 35 struct ath10k_ce_pipe; 36 37 #define CE_DESC_FLAGS_GATHER (1 << 0) 38 #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) 39 40 /* Following desc flags are used in QCA99X0 */ 41 #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) 42 #define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3) 43 44 #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask 45 #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb 46 47 struct ce_desc { 48 __le32 addr; 49 __le16 nbytes; 50 __le16 flags; /* %CE_DESC_FLAGS_ */ 51 }; 52 53 struct ath10k_ce_ring { 54 /* Number of entries in this ring; must be power of 2 */ 55 unsigned int nentries; 56 unsigned int nentries_mask; 57 58 /* 59 * For dest ring, this is the next index to be processed 60 * by software after it was/is received into. 61 * 62 * For src ring, this is the last descriptor that was sent 63 * and completion processed by software. 64 * 65 * Regardless of src or dest ring, this is an invariant 66 * (modulo ring size): 67 * write index >= read index >= sw_index 68 */ 69 unsigned int sw_index; 70 /* cached copy */ 71 unsigned int write_index; 72 /* 73 * For src ring, this is the next index not yet processed by HW. 74 * This is a cached copy of the real HW index (read index), used 75 * for avoiding reading the HW index register more often than 76 * necessary. 77 * This extends the invariant: 78 * write index >= read index >= hw_index >= sw_index 79 * 80 * For dest ring, this is currently unused. 81 */ 82 /* cached copy */ 83 unsigned int hw_index; 84 85 /* Start of DMA-coherent area reserved for descriptors */ 86 /* Host address space */ 87 void *base_addr_owner_space_unaligned; 88 /* CE address space */ 89 u32 base_addr_ce_space_unaligned; 90 91 /* 92 * Actual start of descriptors. 93 * Aligned to descriptor-size boundary. 94 * Points into reserved DMA-coherent area, above. 95 */ 96 /* Host address space */ 97 void *base_addr_owner_space; 98 99 /* CE address space */ 100 u32 base_addr_ce_space; 101 102 /* keep last */ 103 void *per_transfer_context[0]; 104 }; 105 106 struct ath10k_ce_pipe { 107 struct ath10k *ar; 108 unsigned int id; 109 110 unsigned int attr_flags; 111 112 u32 ctrl_addr; 113 114 void (*send_cb)(struct ath10k_ce_pipe *); 115 void (*recv_cb)(struct ath10k_ce_pipe *); 116 117 unsigned int src_sz_max; 118 struct ath10k_ce_ring *src_ring; 119 struct ath10k_ce_ring *dest_ring; 120 }; 121 122 /* Copy Engine settable attributes */ 123 struct ce_attr; 124 125 /*==================Send====================*/ 126 127 /* ath10k_ce_send flags */ 128 #define CE_SEND_FLAG_BYTE_SWAP 1 129 130 /* 131 * Queue a source buffer to be sent to an anonymous destination buffer. 132 * ce - which copy engine to use 133 * buffer - address of buffer 134 * nbytes - number of bytes to send 135 * transfer_id - arbitrary ID; reflected to destination 136 * flags - CE_SEND_FLAG_* values 137 * Returns 0 on success; otherwise an error status. 138 * 139 * Note: If no flags are specified, use CE's default data swap mode. 140 * 141 * Implementation note: pushes 1 buffer to Source ring 142 */ 143 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 144 void *per_transfer_send_context, 145 u32 buffer, 146 unsigned int nbytes, 147 /* 14 bits */ 148 unsigned int transfer_id, 149 unsigned int flags); 150 151 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 152 void *per_transfer_context, 153 u32 buffer, 154 unsigned int nbytes, 155 unsigned int transfer_id, 156 unsigned int flags); 157 158 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); 159 160 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); 161 162 /*==================Recv=======================*/ 163 164 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); 165 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 166 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 167 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries); 168 169 /* recv flags */ 170 /* Data is byte-swapped */ 171 #define CE_RECV_FLAG_SWAPPED 1 172 173 /* 174 * Supply data for the next completed unprocessed receive descriptor. 175 * Pops buffer from Dest ring. 176 */ 177 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 178 void **per_transfer_contextp, 179 unsigned int *nbytesp); 180 /* 181 * Supply data for the next completed unprocessed send descriptor. 182 * Pops 1 completed send buffer from Source ring. 183 */ 184 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 185 void **per_transfer_contextp); 186 187 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 188 void **per_transfer_contextp); 189 190 /*==================CE Engine Initialization=======================*/ 191 192 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, 193 const struct ce_attr *attr); 194 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); 195 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, 196 const struct ce_attr *attr); 197 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); 198 199 /*==================CE Engine Shutdown=======================*/ 200 /* 201 * Support clean shutdown by allowing the caller to revoke 202 * receive buffers. Target DMA must be stopped before using 203 * this API. 204 */ 205 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, 206 void **per_transfer_contextp, 207 u32 *bufferp); 208 209 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 210 void **per_transfer_contextp, 211 unsigned int *nbytesp); 212 213 /* 214 * Support clean shutdown by allowing the caller to cancel 215 * pending sends. Target DMA must be stopped before using 216 * this API. 217 */ 218 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, 219 void **per_transfer_contextp, 220 u32 *bufferp, 221 unsigned int *nbytesp, 222 unsigned int *transfer_idp); 223 224 /*==================CE Interrupt Handlers====================*/ 225 void ath10k_ce_per_engine_service_any(struct ath10k *ar); 226 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); 227 int ath10k_ce_disable_interrupts(struct ath10k *ar); 228 void ath10k_ce_enable_interrupts(struct ath10k *ar); 229 void ath10k_ce_dump_registers(struct ath10k *ar, 230 struct ath10k_fw_crash_data *crash_data); 231 232 /* ce_attr.flags values */ 233 /* Use NonSnooping PCIe accesses? */ 234 #define CE_ATTR_NO_SNOOP 1 235 236 /* Byte swap data words */ 237 #define CE_ATTR_BYTE_SWAP_DATA 2 238 239 /* Swizzle descriptors? */ 240 #define CE_ATTR_SWIZZLE_DESCRIPTORS 4 241 242 /* no interrupt on copy completion */ 243 #define CE_ATTR_DIS_INTR 8 244 245 /* Attributes of an instance of a Copy Engine */ 246 struct ce_attr { 247 /* CE_ATTR_* values */ 248 unsigned int flags; 249 250 /* #entries in source ring - Must be a power of 2 */ 251 unsigned int src_nentries; 252 253 /* 254 * Max source send size for this CE. 255 * This is also the minimum size of a destination buffer. 256 */ 257 unsigned int src_sz_max; 258 259 /* #entries in destination ring - Must be a power of 2 */ 260 unsigned int dest_nentries; 261 262 void (*send_cb)(struct ath10k_ce_pipe *); 263 void (*recv_cb)(struct ath10k_ce_pipe *); 264 }; 265 266 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) 267 { 268 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; 269 } 270 271 #define CE_SRC_RING_TO_DESC(baddr, idx) \ 272 (&(((struct ce_desc *)baddr)[idx])) 273 274 #define CE_DEST_RING_TO_DESC(baddr, idx) \ 275 (&(((struct ce_desc *)baddr)[idx])) 276 277 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ 278 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ 279 (((int)(toidx) - (int)(fromidx)) & (nentries_mask)) 280 281 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) 282 #define CE_RING_IDX_ADD(nentries_mask, idx, num) \ 283 (((idx) + (num)) & (nentries_mask)) 284 285 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ 286 ar->regs->ce_wrap_intr_sum_host_msi_lsb 287 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ 288 ar->regs->ce_wrap_intr_sum_host_msi_mask 289 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ 290 (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ 291 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) 292 #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 293 294 #define CE_INTERRUPT_SUMMARY(ar) \ 295 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ 296 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ 297 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) 298 299 #endif /* _CE_H_ */ 300