1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _CE_H_ 19 #define _CE_H_ 20 21 #include "hif.h" 22 23 /* Maximum number of Copy Engine's supported */ 24 #define CE_COUNT_MAX 8 25 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 26 27 /* Descriptor rings must be aligned to this boundary */ 28 #define CE_DESC_RING_ALIGN 8 29 #define CE_SEND_FLAG_GATHER 0x00010000 30 31 /* 32 * Copy Engine support: low-level Target-side Copy Engine API. 33 * This is a hardware access layer used by code that understands 34 * how to use copy engines. 35 */ 36 37 struct ath10k_ce_pipe; 38 39 #define CE_DESC_FLAGS_GATHER (1 << 0) 40 #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) 41 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC 42 #define CE_DESC_FLAGS_META_DATA_LSB 2 43 44 struct ce_desc { 45 __le32 addr; 46 __le16 nbytes; 47 __le16 flags; /* %CE_DESC_FLAGS_ */ 48 }; 49 50 struct ath10k_ce_ring { 51 /* Number of entries in this ring; must be power of 2 */ 52 unsigned int nentries; 53 unsigned int nentries_mask; 54 55 /* 56 * For dest ring, this is the next index to be processed 57 * by software after it was/is received into. 58 * 59 * For src ring, this is the last descriptor that was sent 60 * and completion processed by software. 61 * 62 * Regardless of src or dest ring, this is an invariant 63 * (modulo ring size): 64 * write index >= read index >= sw_index 65 */ 66 unsigned int sw_index; 67 /* cached copy */ 68 unsigned int write_index; 69 /* 70 * For src ring, this is the next index not yet processed by HW. 71 * This is a cached copy of the real HW index (read index), used 72 * for avoiding reading the HW index register more often than 73 * necessary. 74 * This extends the invariant: 75 * write index >= read index >= hw_index >= sw_index 76 * 77 * For dest ring, this is currently unused. 78 */ 79 /* cached copy */ 80 unsigned int hw_index; 81 82 /* Start of DMA-coherent area reserved for descriptors */ 83 /* Host address space */ 84 void *base_addr_owner_space_unaligned; 85 /* CE address space */ 86 u32 base_addr_ce_space_unaligned; 87 88 /* 89 * Actual start of descriptors. 90 * Aligned to descriptor-size boundary. 91 * Points into reserved DMA-coherent area, above. 92 */ 93 /* Host address space */ 94 void *base_addr_owner_space; 95 96 /* CE address space */ 97 u32 base_addr_ce_space; 98 /* 99 * Start of shadow copy of descriptors, within regular memory. 100 * Aligned to descriptor-size boundary. 101 */ 102 void *shadow_base_unaligned; 103 struct ce_desc *shadow_base; 104 105 /* keep last */ 106 void *per_transfer_context[0]; 107 }; 108 109 struct ath10k_ce_pipe { 110 struct ath10k *ar; 111 unsigned int id; 112 113 unsigned int attr_flags; 114 115 u32 ctrl_addr; 116 117 void (*send_cb)(struct ath10k_ce_pipe *); 118 void (*recv_cb)(struct ath10k_ce_pipe *); 119 120 unsigned int src_sz_max; 121 struct ath10k_ce_ring *src_ring; 122 struct ath10k_ce_ring *dest_ring; 123 }; 124 125 /* Copy Engine settable attributes */ 126 struct ce_attr; 127 128 /*==================Send====================*/ 129 130 /* ath10k_ce_send flags */ 131 #define CE_SEND_FLAG_BYTE_SWAP 1 132 133 /* 134 * Queue a source buffer to be sent to an anonymous destination buffer. 135 * ce - which copy engine to use 136 * buffer - address of buffer 137 * nbytes - number of bytes to send 138 * transfer_id - arbitrary ID; reflected to destination 139 * flags - CE_SEND_FLAG_* values 140 * Returns 0 on success; otherwise an error status. 141 * 142 * Note: If no flags are specified, use CE's default data swap mode. 143 * 144 * Implementation note: pushes 1 buffer to Source ring 145 */ 146 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 147 void *per_transfer_send_context, 148 u32 buffer, 149 unsigned int nbytes, 150 /* 14 bits */ 151 unsigned int transfer_id, 152 unsigned int flags); 153 154 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 155 void *per_transfer_context, 156 u32 buffer, 157 unsigned int nbytes, 158 unsigned int transfer_id, 159 unsigned int flags); 160 161 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); 162 163 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); 164 165 /*==================Recv=======================*/ 166 167 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); 168 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 169 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); 170 171 /* recv flags */ 172 /* Data is byte-swapped */ 173 #define CE_RECV_FLAG_SWAPPED 1 174 175 /* 176 * Supply data for the next completed unprocessed receive descriptor. 177 * Pops buffer from Dest ring. 178 */ 179 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 180 void **per_transfer_contextp, 181 u32 *bufferp, 182 unsigned int *nbytesp, 183 unsigned int *transfer_idp, 184 unsigned int *flagsp); 185 /* 186 * Supply data for the next completed unprocessed send descriptor. 187 * Pops 1 completed send buffer from Source ring. 188 */ 189 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 190 void **per_transfer_contextp, 191 u32 *bufferp, 192 unsigned int *nbytesp, 193 unsigned int *transfer_idp); 194 195 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 196 void **per_transfer_contextp, 197 u32 *bufferp, 198 unsigned int *nbytesp, 199 unsigned int *transfer_idp); 200 201 /*==================CE Engine Initialization=======================*/ 202 203 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, 204 const struct ce_attr *attr); 205 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); 206 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, 207 const struct ce_attr *attr, 208 void (*send_cb)(struct ath10k_ce_pipe *), 209 void (*recv_cb)(struct ath10k_ce_pipe *)); 210 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); 211 212 /*==================CE Engine Shutdown=======================*/ 213 /* 214 * Support clean shutdown by allowing the caller to revoke 215 * receive buffers. Target DMA must be stopped before using 216 * this API. 217 */ 218 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, 219 void **per_transfer_contextp, 220 u32 *bufferp); 221 222 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 223 void **per_transfer_contextp, 224 u32 *bufferp, 225 unsigned int *nbytesp, 226 unsigned int *transfer_idp, 227 unsigned int *flagsp); 228 229 /* 230 * Support clean shutdown by allowing the caller to cancel 231 * pending sends. Target DMA must be stopped before using 232 * this API. 233 */ 234 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, 235 void **per_transfer_contextp, 236 u32 *bufferp, 237 unsigned int *nbytesp, 238 unsigned int *transfer_idp); 239 240 /*==================CE Interrupt Handlers====================*/ 241 void ath10k_ce_per_engine_service_any(struct ath10k *ar); 242 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); 243 int ath10k_ce_disable_interrupts(struct ath10k *ar); 244 void ath10k_ce_enable_interrupts(struct ath10k *ar); 245 246 /* ce_attr.flags values */ 247 /* Use NonSnooping PCIe accesses? */ 248 #define CE_ATTR_NO_SNOOP 1 249 250 /* Byte swap data words */ 251 #define CE_ATTR_BYTE_SWAP_DATA 2 252 253 /* Swizzle descriptors? */ 254 #define CE_ATTR_SWIZZLE_DESCRIPTORS 4 255 256 /* no interrupt on copy completion */ 257 #define CE_ATTR_DIS_INTR 8 258 259 /* Attributes of an instance of a Copy Engine */ 260 struct ce_attr { 261 /* CE_ATTR_* values */ 262 unsigned int flags; 263 264 /* #entries in source ring - Must be a power of 2 */ 265 unsigned int src_nentries; 266 267 /* 268 * Max source send size for this CE. 269 * This is also the minimum size of a destination buffer. 270 */ 271 unsigned int src_sz_max; 272 273 /* #entries in destination ring - Must be a power of 2 */ 274 unsigned int dest_nentries; 275 }; 276 277 #define SR_BA_ADDRESS 0x0000 278 #define SR_SIZE_ADDRESS 0x0004 279 #define DR_BA_ADDRESS 0x0008 280 #define DR_SIZE_ADDRESS 0x000c 281 #define CE_CMD_ADDRESS 0x0018 282 283 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17 284 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17 285 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000 286 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \ 287 (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \ 288 CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) 289 290 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16 291 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16 292 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000 293 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \ 294 (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \ 295 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) 296 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \ 297 (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \ 298 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) 299 300 #define CE_CTRL1_DMAX_LENGTH_MSB 15 301 #define CE_CTRL1_DMAX_LENGTH_LSB 0 302 #define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff 303 #define CE_CTRL1_DMAX_LENGTH_GET(x) \ 304 (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB) 305 #define CE_CTRL1_DMAX_LENGTH_SET(x) \ 306 (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK) 307 308 #define CE_CTRL1_ADDRESS 0x0010 309 #define CE_CTRL1_HW_MASK 0x0007ffff 310 #define CE_CTRL1_SW_MASK 0x0007ffff 311 #define CE_CTRL1_HW_WRITE_MASK 0x00000000 312 #define CE_CTRL1_SW_WRITE_MASK 0x0007ffff 313 #define CE_CTRL1_RSTMASK 0xffffffff 314 #define CE_CTRL1_RESET 0x00000080 315 316 #define CE_CMD_HALT_STATUS_MSB 3 317 #define CE_CMD_HALT_STATUS_LSB 3 318 #define CE_CMD_HALT_STATUS_MASK 0x00000008 319 #define CE_CMD_HALT_STATUS_GET(x) \ 320 (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB) 321 #define CE_CMD_HALT_STATUS_SET(x) \ 322 (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK) 323 #define CE_CMD_HALT_STATUS_RESET 0 324 #define CE_CMD_HALT_MSB 0 325 #define CE_CMD_HALT_MASK 0x00000001 326 327 #define HOST_IE_COPY_COMPLETE_MSB 0 328 #define HOST_IE_COPY_COMPLETE_LSB 0 329 #define HOST_IE_COPY_COMPLETE_MASK 0x00000001 330 #define HOST_IE_COPY_COMPLETE_GET(x) \ 331 (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB) 332 #define HOST_IE_COPY_COMPLETE_SET(x) \ 333 (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK) 334 #define HOST_IE_COPY_COMPLETE_RESET 0 335 #define HOST_IE_ADDRESS 0x002c 336 337 #define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010 338 #define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008 339 #define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004 340 #define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002 341 #define HOST_IS_COPY_COMPLETE_MASK 0x00000001 342 #define HOST_IS_ADDRESS 0x0030 343 344 #define MISC_IE_ADDRESS 0x0034 345 346 #define MISC_IS_AXI_ERR_MASK 0x00000400 347 348 #define MISC_IS_DST_ADDR_ERR_MASK 0x00000200 349 #define MISC_IS_SRC_LEN_ERR_MASK 0x00000100 350 #define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080 351 #define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040 352 #define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020 353 354 #define MISC_IS_ADDRESS 0x0038 355 356 #define SR_WR_INDEX_ADDRESS 0x003c 357 358 #define DST_WR_INDEX_ADDRESS 0x0040 359 360 #define CURRENT_SRRI_ADDRESS 0x0044 361 362 #define CURRENT_DRRI_ADDRESS 0x0048 363 364 #define SRC_WATERMARK_LOW_MSB 31 365 #define SRC_WATERMARK_LOW_LSB 16 366 #define SRC_WATERMARK_LOW_MASK 0xffff0000 367 #define SRC_WATERMARK_LOW_GET(x) \ 368 (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB) 369 #define SRC_WATERMARK_LOW_SET(x) \ 370 (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK) 371 #define SRC_WATERMARK_LOW_RESET 0 372 #define SRC_WATERMARK_HIGH_MSB 15 373 #define SRC_WATERMARK_HIGH_LSB 0 374 #define SRC_WATERMARK_HIGH_MASK 0x0000ffff 375 #define SRC_WATERMARK_HIGH_GET(x) \ 376 (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB) 377 #define SRC_WATERMARK_HIGH_SET(x) \ 378 (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK) 379 #define SRC_WATERMARK_HIGH_RESET 0 380 #define SRC_WATERMARK_ADDRESS 0x004c 381 382 #define DST_WATERMARK_LOW_LSB 16 383 #define DST_WATERMARK_LOW_MASK 0xffff0000 384 #define DST_WATERMARK_LOW_SET(x) \ 385 (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK) 386 #define DST_WATERMARK_LOW_RESET 0 387 #define DST_WATERMARK_HIGH_MSB 15 388 #define DST_WATERMARK_HIGH_LSB 0 389 #define DST_WATERMARK_HIGH_MASK 0x0000ffff 390 #define DST_WATERMARK_HIGH_GET(x) \ 391 (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB) 392 #define DST_WATERMARK_HIGH_SET(x) \ 393 (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK) 394 #define DST_WATERMARK_HIGH_RESET 0 395 #define DST_WATERMARK_ADDRESS 0x0050 396 397 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) 398 { 399 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; 400 } 401 402 #define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \ 403 HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \ 404 HOST_IS_DST_RING_LOW_WATERMARK_MASK | \ 405 HOST_IS_DST_RING_HIGH_WATERMARK_MASK) 406 407 #define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \ 408 MISC_IS_DST_ADDR_ERR_MASK | \ 409 MISC_IS_SRC_LEN_ERR_MASK | \ 410 MISC_IS_DST_MAX_LEN_VIO_MASK | \ 411 MISC_IS_DST_RING_OVERFLOW_MASK | \ 412 MISC_IS_SRC_RING_OVERFLOW_MASK) 413 414 #define CE_SRC_RING_TO_DESC(baddr, idx) \ 415 (&(((struct ce_desc *)baddr)[idx])) 416 417 #define CE_DEST_RING_TO_DESC(baddr, idx) \ 418 (&(((struct ce_desc *)baddr)[idx])) 419 420 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */ 421 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \ 422 (((int)(toidx)-(int)(fromidx)) & (nentries_mask)) 423 424 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) 425 426 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 427 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 428 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ 429 (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ 430 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) 431 #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 432 433 #define CE_INTERRUPT_SUMMARY(ar) \ 434 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ 435 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ 436 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) 437 438 #endif /* _CE_H_ */ 439