xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/ce.h (revision e23feb16)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _CE_H_
19 #define _CE_H_
20 
21 #include "hif.h"
22 
23 
24 /* Maximum number of Copy Engine's supported */
25 #define CE_COUNT_MAX 8
26 #define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
27 
28 /* Descriptor rings must be aligned to this boundary */
29 #define CE_DESC_RING_ALIGN	8
30 #define CE_SENDLIST_ITEMS_MAX	12
31 #define CE_SEND_FLAG_GATHER	0x00010000
32 
33 /*
34  * Copy Engine support: low-level Target-side Copy Engine API.
35  * This is a hardware access layer used by code that understands
36  * how to use copy engines.
37  */
38 
39 struct ce_state;
40 
41 
42 /* Copy Engine operational state */
43 enum ce_op_state {
44 	CE_UNUSED,
45 	CE_PAUSED,
46 	CE_RUNNING,
47 };
48 
49 #define CE_DESC_FLAGS_GATHER         (1 << 0)
50 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
51 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
52 #define CE_DESC_FLAGS_META_DATA_LSB  3
53 
54 struct ce_desc {
55 	__le32 addr;
56 	__le16 nbytes;
57 	__le16 flags; /* %CE_DESC_FLAGS_ */
58 };
59 
60 /* Copy Engine Ring internal state */
61 struct ce_ring_state {
62 	/* Number of entries in this ring; must be power of 2 */
63 	unsigned int nentries;
64 	unsigned int nentries_mask;
65 
66 	/*
67 	 * For dest ring, this is the next index to be processed
68 	 * by software after it was/is received into.
69 	 *
70 	 * For src ring, this is the last descriptor that was sent
71 	 * and completion processed by software.
72 	 *
73 	 * Regardless of src or dest ring, this is an invariant
74 	 * (modulo ring size):
75 	 *     write index >= read index >= sw_index
76 	 */
77 	unsigned int sw_index;
78 	/* cached copy */
79 	unsigned int write_index;
80 	/*
81 	 * For src ring, this is the next index not yet processed by HW.
82 	 * This is a cached copy of the real HW index (read index), used
83 	 * for avoiding reading the HW index register more often than
84 	 * necessary.
85 	 * This extends the invariant:
86 	 *     write index >= read index >= hw_index >= sw_index
87 	 *
88 	 * For dest ring, this is currently unused.
89 	 */
90 	/* cached copy */
91 	unsigned int hw_index;
92 
93 	/* Start of DMA-coherent area reserved for descriptors */
94 	/* Host address space */
95 	void *base_addr_owner_space_unaligned;
96 	/* CE address space */
97 	u32 base_addr_ce_space_unaligned;
98 
99 	/*
100 	 * Actual start of descriptors.
101 	 * Aligned to descriptor-size boundary.
102 	 * Points into reserved DMA-coherent area, above.
103 	 */
104 	/* Host address space */
105 	void *base_addr_owner_space;
106 
107 	/* CE address space */
108 	u32 base_addr_ce_space;
109 	/*
110 	 * Start of shadow copy of descriptors, within regular memory.
111 	 * Aligned to descriptor-size boundary.
112 	 */
113 	void *shadow_base_unaligned;
114 	struct ce_desc *shadow_base;
115 
116 	void **per_transfer_context;
117 };
118 
119 /* Copy Engine internal state */
120 struct ce_state {
121 	struct ath10k *ar;
122 	unsigned int id;
123 
124 	unsigned int attr_flags;
125 
126 	u32 ctrl_addr;
127 	enum ce_op_state state;
128 
129 	void (*send_cb) (struct ce_state *ce_state,
130 			 void *per_transfer_send_context,
131 			 u32 buffer,
132 			 unsigned int nbytes,
133 			 unsigned int transfer_id);
134 	void (*recv_cb) (struct ce_state *ce_state,
135 			 void *per_transfer_recv_context,
136 			 u32 buffer,
137 			 unsigned int nbytes,
138 			 unsigned int transfer_id,
139 			 unsigned int flags);
140 
141 	unsigned int src_sz_max;
142 	struct ce_ring_state *src_ring;
143 	struct ce_ring_state *dest_ring;
144 };
145 
146 struct ce_sendlist_item {
147 	/* e.g. buffer or desc list */
148 	dma_addr_t data;
149 	union {
150 		/* simple buffer */
151 		unsigned int nbytes;
152 		/* Rx descriptor list */
153 		unsigned int ndesc;
154 	} u;
155 	/* externally-specified flags; OR-ed with internal flags */
156 	u32 flags;
157 };
158 
159 struct ce_sendlist {
160 	unsigned int num_items;
161 	struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
162 };
163 
164 /* Copy Engine settable attributes */
165 struct ce_attr;
166 
167 /*==================Send====================*/
168 
169 /* ath10k_ce_send flags */
170 #define CE_SEND_FLAG_BYTE_SWAP 1
171 
172 /*
173  * Queue a source buffer to be sent to an anonymous destination buffer.
174  *   ce         - which copy engine to use
175  *   buffer          - address of buffer
176  *   nbytes          - number of bytes to send
177  *   transfer_id     - arbitrary ID; reflected to destination
178  *   flags           - CE_SEND_FLAG_* values
179  * Returns 0 on success; otherwise an error status.
180  *
181  * Note: If no flags are specified, use CE's default data swap mode.
182  *
183  * Implementation note: pushes 1 buffer to Source ring
184  */
185 int ath10k_ce_send(struct ce_state *ce_state,
186 		   void *per_transfer_send_context,
187 		   u32 buffer,
188 		   unsigned int nbytes,
189 		   /* 14 bits */
190 		   unsigned int transfer_id,
191 		   unsigned int flags);
192 
193 void ath10k_ce_send_cb_register(struct ce_state *ce_state,
194 				void (*send_cb) (struct ce_state *ce_state,
195 						 void *transfer_context,
196 						 u32 buffer,
197 						 unsigned int nbytes,
198 						 unsigned int transfer_id),
199 				int disable_interrupts);
200 
201 /* Append a simple buffer (address/length) to a sendlist. */
202 void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
203 				u32 buffer,
204 				unsigned int nbytes,
205 				/* OR-ed with internal flags */
206 				u32 flags);
207 
208 /*
209  * Queue a "sendlist" of buffers to be sent using gather to a single
210  * anonymous destination buffer
211  *   ce         - which copy engine to use
212  *   sendlist        - list of simple buffers to send using gather
213  *   transfer_id     - arbitrary ID; reflected to destination
214  * Returns 0 on success; otherwise an error status.
215  *
216  * Implemenation note: Pushes multiple buffers with Gather to Source ring.
217  */
218 int ath10k_ce_sendlist_send(struct ce_state *ce_state,
219 			    void *per_transfer_send_context,
220 			    struct ce_sendlist *sendlist,
221 			    /* 14 bits */
222 			    unsigned int transfer_id);
223 
224 /*==================Recv=======================*/
225 
226 /*
227  * Make a buffer available to receive. The buffer must be at least of a
228  * minimal size appropriate for this copy engine (src_sz_max attribute).
229  *   ce                    - which copy engine to use
230  *   per_transfer_recv_context  - context passed back to caller's recv_cb
231  *   buffer                     - address of buffer in CE space
232  * Returns 0 on success; otherwise an error status.
233  *
234  * Implemenation note: Pushes a buffer to Dest ring.
235  */
236 int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
237 			       void *per_transfer_recv_context,
238 			       u32 buffer);
239 
240 void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
241 				void (*recv_cb) (struct ce_state *ce_state,
242 						 void *transfer_context,
243 						 u32 buffer,
244 						 unsigned int nbytes,
245 						 unsigned int transfer_id,
246 						 unsigned int flags));
247 
248 /* recv flags */
249 /* Data is byte-swapped */
250 #define CE_RECV_FLAG_SWAPPED	1
251 
252 /*
253  * Supply data for the next completed unprocessed receive descriptor.
254  * Pops buffer from Dest ring.
255  */
256 int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
257 				  void **per_transfer_contextp,
258 				  u32 *bufferp,
259 				  unsigned int *nbytesp,
260 				  unsigned int *transfer_idp,
261 				  unsigned int *flagsp);
262 /*
263  * Supply data for the next completed unprocessed send descriptor.
264  * Pops 1 completed send buffer from Source ring.
265  */
266 int ath10k_ce_completed_send_next(struct ce_state *ce_state,
267 			   void **per_transfer_contextp,
268 			   u32 *bufferp,
269 			   unsigned int *nbytesp,
270 			   unsigned int *transfer_idp);
271 
272 /*==================CE Engine Initialization=======================*/
273 
274 /* Initialize an instance of a CE */
275 struct ce_state *ath10k_ce_init(struct ath10k *ar,
276 				unsigned int ce_id,
277 				const struct ce_attr *attr);
278 
279 /*==================CE Engine Shutdown=======================*/
280 /*
281  * Support clean shutdown by allowing the caller to revoke
282  * receive buffers.  Target DMA must be stopped before using
283  * this API.
284  */
285 int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
286 			       void **per_transfer_contextp,
287 			       u32 *bufferp);
288 
289 /*
290  * Support clean shutdown by allowing the caller to cancel
291  * pending sends.  Target DMA must be stopped before using
292  * this API.
293  */
294 int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
295 			       void **per_transfer_contextp,
296 			       u32 *bufferp,
297 			       unsigned int *nbytesp,
298 			       unsigned int *transfer_idp);
299 
300 void ath10k_ce_deinit(struct ce_state *ce_state);
301 
302 /*==================CE Interrupt Handlers====================*/
303 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
304 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
305 void ath10k_ce_disable_interrupts(struct ath10k *ar);
306 
307 /* ce_attr.flags values */
308 /* Use NonSnooping PCIe accesses? */
309 #define CE_ATTR_NO_SNOOP		1
310 
311 /* Byte swap data words */
312 #define CE_ATTR_BYTE_SWAP_DATA		2
313 
314 /* Swizzle descriptors? */
315 #define CE_ATTR_SWIZZLE_DESCRIPTORS	4
316 
317 /* no interrupt on copy completion */
318 #define CE_ATTR_DIS_INTR		8
319 
320 /* Attributes of an instance of a Copy Engine */
321 struct ce_attr {
322 	/* CE_ATTR_* values */
323 	unsigned int flags;
324 
325 	/* currently not in use */
326 	unsigned int priority;
327 
328 	/* #entries in source ring - Must be a power of 2 */
329 	unsigned int src_nentries;
330 
331 	/*
332 	 * Max source send size for this CE.
333 	 * This is also the minimum size of a destination buffer.
334 	 */
335 	unsigned int src_sz_max;
336 
337 	/* #entries in destination ring - Must be a power of 2 */
338 	unsigned int dest_nentries;
339 
340 	/* Future use */
341 	void *reserved;
342 };
343 
344 /*
345  * When using sendlist_send to transfer multiple buffer fragments, the
346  * transfer context of each fragment, except last one, will be filled
347  * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
348  * each fragment done with send and the transfer context would be
349  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
350  * status of a send completion.
351  */
352 #define CE_SENDLIST_ITEM_CTXT	((void *)0xcecebeef)
353 
354 #define SR_BA_ADDRESS		0x0000
355 #define SR_SIZE_ADDRESS		0x0004
356 #define DR_BA_ADDRESS		0x0008
357 #define DR_SIZE_ADDRESS		0x000c
358 #define CE_CMD_ADDRESS		0x0018
359 
360 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB	17
361 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB	17
362 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK	0x00020000
363 #define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
364 	(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
365 	CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
366 
367 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB	16
368 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB	16
369 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK	0x00010000
370 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
371 	(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
372 	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
373 #define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
374 	(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
375 	 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
376 
377 #define CE_CTRL1_DMAX_LENGTH_MSB		15
378 #define CE_CTRL1_DMAX_LENGTH_LSB		0
379 #define CE_CTRL1_DMAX_LENGTH_MASK		0x0000ffff
380 #define CE_CTRL1_DMAX_LENGTH_GET(x) \
381 	(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
382 #define CE_CTRL1_DMAX_LENGTH_SET(x) \
383 	(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
384 
385 #define CE_CTRL1_ADDRESS			0x0010
386 #define CE_CTRL1_HW_MASK			0x0007ffff
387 #define CE_CTRL1_SW_MASK			0x0007ffff
388 #define CE_CTRL1_HW_WRITE_MASK			0x00000000
389 #define CE_CTRL1_SW_WRITE_MASK			0x0007ffff
390 #define CE_CTRL1_RSTMASK			0xffffffff
391 #define CE_CTRL1_RESET				0x00000080
392 
393 #define CE_CMD_HALT_STATUS_MSB			3
394 #define CE_CMD_HALT_STATUS_LSB			3
395 #define CE_CMD_HALT_STATUS_MASK			0x00000008
396 #define CE_CMD_HALT_STATUS_GET(x) \
397 	(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
398 #define CE_CMD_HALT_STATUS_SET(x) \
399 	(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
400 #define CE_CMD_HALT_STATUS_RESET		0
401 #define CE_CMD_HALT_MSB				0
402 #define CE_CMD_HALT_MASK			0x00000001
403 
404 #define HOST_IE_COPY_COMPLETE_MSB		0
405 #define HOST_IE_COPY_COMPLETE_LSB		0
406 #define HOST_IE_COPY_COMPLETE_MASK		0x00000001
407 #define HOST_IE_COPY_COMPLETE_GET(x) \
408 	(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
409 #define HOST_IE_COPY_COMPLETE_SET(x) \
410 	(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
411 #define HOST_IE_COPY_COMPLETE_RESET		0
412 #define HOST_IE_ADDRESS				0x002c
413 
414 #define HOST_IS_DST_RING_LOW_WATERMARK_MASK	0x00000010
415 #define HOST_IS_DST_RING_HIGH_WATERMARK_MASK	0x00000008
416 #define HOST_IS_SRC_RING_LOW_WATERMARK_MASK	0x00000004
417 #define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK	0x00000002
418 #define HOST_IS_COPY_COMPLETE_MASK		0x00000001
419 #define HOST_IS_ADDRESS				0x0030
420 
421 #define MISC_IE_ADDRESS				0x0034
422 
423 #define MISC_IS_AXI_ERR_MASK			0x00000400
424 
425 #define MISC_IS_DST_ADDR_ERR_MASK		0x00000200
426 #define MISC_IS_SRC_LEN_ERR_MASK		0x00000100
427 #define MISC_IS_DST_MAX_LEN_VIO_MASK		0x00000080
428 #define MISC_IS_DST_RING_OVERFLOW_MASK		0x00000040
429 #define MISC_IS_SRC_RING_OVERFLOW_MASK		0x00000020
430 
431 #define MISC_IS_ADDRESS				0x0038
432 
433 #define SR_WR_INDEX_ADDRESS			0x003c
434 
435 #define DST_WR_INDEX_ADDRESS			0x0040
436 
437 #define CURRENT_SRRI_ADDRESS			0x0044
438 
439 #define CURRENT_DRRI_ADDRESS			0x0048
440 
441 #define SRC_WATERMARK_LOW_MSB			31
442 #define SRC_WATERMARK_LOW_LSB			16
443 #define SRC_WATERMARK_LOW_MASK			0xffff0000
444 #define SRC_WATERMARK_LOW_GET(x) \
445 	(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
446 #define SRC_WATERMARK_LOW_SET(x) \
447 	(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
448 #define SRC_WATERMARK_LOW_RESET			0
449 #define SRC_WATERMARK_HIGH_MSB			15
450 #define SRC_WATERMARK_HIGH_LSB			0
451 #define SRC_WATERMARK_HIGH_MASK			0x0000ffff
452 #define SRC_WATERMARK_HIGH_GET(x) \
453 	(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
454 #define SRC_WATERMARK_HIGH_SET(x) \
455 	(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
456 #define SRC_WATERMARK_HIGH_RESET		0
457 #define SRC_WATERMARK_ADDRESS			0x004c
458 
459 #define DST_WATERMARK_LOW_LSB			16
460 #define DST_WATERMARK_LOW_MASK			0xffff0000
461 #define DST_WATERMARK_LOW_SET(x) \
462 	(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
463 #define DST_WATERMARK_LOW_RESET			0
464 #define DST_WATERMARK_HIGH_MSB			15
465 #define DST_WATERMARK_HIGH_LSB			0
466 #define DST_WATERMARK_HIGH_MASK			0x0000ffff
467 #define DST_WATERMARK_HIGH_GET(x) \
468 	(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
469 #define DST_WATERMARK_HIGH_SET(x) \
470 	(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
471 #define DST_WATERMARK_HIGH_RESET		0
472 #define DST_WATERMARK_ADDRESS			0x0050
473 
474 
475 static inline u32 ath10k_ce_base_address(unsigned int ce_id)
476 {
477 	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
478 }
479 
480 #define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK  | \
481 			   HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
482 			   HOST_IS_DST_RING_LOW_WATERMARK_MASK  | \
483 			   HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
484 
485 #define CE_ERROR_MASK	(MISC_IS_AXI_ERR_MASK           | \
486 			 MISC_IS_DST_ADDR_ERR_MASK      | \
487 			 MISC_IS_SRC_LEN_ERR_MASK       | \
488 			 MISC_IS_DST_MAX_LEN_VIO_MASK   | \
489 			 MISC_IS_DST_RING_OVERFLOW_MASK | \
490 			 MISC_IS_SRC_RING_OVERFLOW_MASK)
491 
492 #define CE_SRC_RING_TO_DESC(baddr, idx) \
493 	(&(((struct ce_desc *)baddr)[idx]))
494 
495 #define CE_DEST_RING_TO_DESC(baddr, idx) \
496 	(&(((struct ce_desc *)baddr)[idx]))
497 
498 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
499 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
500 	(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
501 
502 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
503 
504 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB		8
505 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK		0x0000ff00
506 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
507 	(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
508 		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
509 #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS			0x0000
510 
511 #define CE_INTERRUPT_SUMMARY(ar) \
512 	CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
513 		ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
514 		CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
515 
516 #endif /* _CE_H_ */
517