1c89105c9SRoy Pledge /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2c89105c9SRoy Pledge /*
3c89105c9SRoy Pledge  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4d31beda2SYouri Querry  * Copyright 2016-2019 NXP
5c89105c9SRoy Pledge  *
6c89105c9SRoy Pledge  */
7c89105c9SRoy Pledge #ifndef __FSL_QBMAN_PORTAL_H
8c89105c9SRoy Pledge #define __FSL_QBMAN_PORTAL_H
9c89105c9SRoy Pledge 
10c89105c9SRoy Pledge #include <soc/fsl/dpaa2-fd.h>
11c89105c9SRoy Pledge 
129d988097SYouri Querry #define QMAN_REV_4000   0x04000000
139d988097SYouri Querry #define QMAN_REV_4100   0x04010000
149d988097SYouri Querry #define QMAN_REV_4101   0x04010001
159d988097SYouri Querry #define QMAN_REV_5000   0x05000000
169d988097SYouri Querry 
179d988097SYouri Querry #define QMAN_REV_MASK   0xffff0000
189d988097SYouri Querry 
19c89105c9SRoy Pledge struct dpaa2_dq;
20c89105c9SRoy Pledge struct qbman_swp;
21c89105c9SRoy Pledge 
22c89105c9SRoy Pledge /* qbman software portal descriptor structure */
23c89105c9SRoy Pledge struct qbman_swp_desc {
24c89105c9SRoy Pledge 	void *cena_bar; /* Cache-enabled portal base address */
25c89105c9SRoy Pledge 	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26c89105c9SRoy Pledge 	u32 qman_version;
27c89105c9SRoy Pledge };
28c89105c9SRoy Pledge 
29c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQRI 0x01
30c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQDI 0x02
31c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_DQRI 0x04
32c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCRI 0x08
33c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCDI 0x10
34c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_VDCI 0x20
35c89105c9SRoy Pledge 
36c89105c9SRoy Pledge /* the structure for pull dequeue descriptor */
37c89105c9SRoy Pledge struct qbman_pull_desc {
38c89105c9SRoy Pledge 	u8 verb;
39c89105c9SRoy Pledge 	u8 numf;
40c89105c9SRoy Pledge 	u8 tok;
41c89105c9SRoy Pledge 	u8 reserved;
42c89105c9SRoy Pledge 	__le32 dq_src;
43c89105c9SRoy Pledge 	__le64 rsp_addr;
44c89105c9SRoy Pledge 	u64 rsp_addr_virt;
45c89105c9SRoy Pledge 	u8 padding[40];
46c89105c9SRoy Pledge };
47c89105c9SRoy Pledge 
48c89105c9SRoy Pledge enum qbman_pull_type_e {
49c89105c9SRoy Pledge 	/* dequeue with priority precedence, respect intra-class scheduling */
50c89105c9SRoy Pledge 	qbman_pull_type_prio = 1,
51c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, respect ICS */
52c89105c9SRoy Pledge 	qbman_pull_type_active,
53c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, no ICS */
54c89105c9SRoy Pledge 	qbman_pull_type_active_noics
55c89105c9SRoy Pledge };
56c89105c9SRoy Pledge 
57c89105c9SRoy Pledge /* Definitions for parsing dequeue entries */
58c89105c9SRoy Pledge #define QBMAN_RESULT_MASK      0x7f
59c89105c9SRoy Pledge #define QBMAN_RESULT_DQ        0x60
60c89105c9SRoy Pledge #define QBMAN_RESULT_FQRN      0x21
61c89105c9SRoy Pledge #define QBMAN_RESULT_FQRNI     0x22
62c89105c9SRoy Pledge #define QBMAN_RESULT_FQPN      0x24
63c89105c9SRoy Pledge #define QBMAN_RESULT_FQDAN     0x25
64c89105c9SRoy Pledge #define QBMAN_RESULT_CDAN      0x26
65c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_MEM  0x27
66c89105c9SRoy Pledge #define QBMAN_RESULT_CGCU      0x28
67c89105c9SRoy Pledge #define QBMAN_RESULT_BPSCN     0x29
68c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_WQ   0x2a
69c89105c9SRoy Pledge 
70c89105c9SRoy Pledge /* QBMan FQ management command codes */
71c89105c9SRoy Pledge #define QBMAN_FQ_SCHEDULE	0x48
72c89105c9SRoy Pledge #define QBMAN_FQ_FORCE		0x49
73c89105c9SRoy Pledge #define QBMAN_FQ_XON		0x4d
74c89105c9SRoy Pledge #define QBMAN_FQ_XOFF		0x4e
75c89105c9SRoy Pledge 
76c89105c9SRoy Pledge /* structure of enqueue descriptor */
77c89105c9SRoy Pledge struct qbman_eq_desc {
78c89105c9SRoy Pledge 	u8 verb;
79c89105c9SRoy Pledge 	u8 dca;
80c89105c9SRoy Pledge 	__le16 seqnum;
81c89105c9SRoy Pledge 	__le16 orpid;
82c89105c9SRoy Pledge 	__le16 reserved1;
83c89105c9SRoy Pledge 	__le32 tgtid;
84c89105c9SRoy Pledge 	__le32 tag;
85c89105c9SRoy Pledge 	__le16 qdbin;
86c89105c9SRoy Pledge 	u8 qpri;
87c89105c9SRoy Pledge 	u8 reserved[3];
88c89105c9SRoy Pledge 	u8 wae;
89c89105c9SRoy Pledge 	u8 rspid;
90c89105c9SRoy Pledge 	__le64 rsp_addr;
919d988097SYouri Querry };
929d988097SYouri Querry 
939d988097SYouri Querry struct qbman_eq_desc_with_fd {
949d988097SYouri Querry 	struct qbman_eq_desc desc;
95c89105c9SRoy Pledge 	u8 fd[32];
96c89105c9SRoy Pledge };
97c89105c9SRoy Pledge 
98c89105c9SRoy Pledge /* buffer release descriptor */
99c89105c9SRoy Pledge struct qbman_release_desc {
100c89105c9SRoy Pledge 	u8 verb;
101c89105c9SRoy Pledge 	u8 reserved;
102c89105c9SRoy Pledge 	__le16 bpid;
103c89105c9SRoy Pledge 	__le32 reserved2;
104c89105c9SRoy Pledge 	__le64 buf[7];
105c89105c9SRoy Pledge };
106c89105c9SRoy Pledge 
107c89105c9SRoy Pledge /* Management command result codes */
108c89105c9SRoy Pledge #define QBMAN_MC_RSLT_OK      0xf0
109c89105c9SRoy Pledge 
110c89105c9SRoy Pledge #define CODE_CDAN_WE_EN    0x1
111c89105c9SRoy Pledge #define CODE_CDAN_WE_CTX   0x4
112c89105c9SRoy Pledge 
113c89105c9SRoy Pledge /* portal data structure */
114c89105c9SRoy Pledge struct qbman_swp {
115c89105c9SRoy Pledge 	const struct qbman_swp_desc *desc;
116c89105c9SRoy Pledge 	void *addr_cena;
117c89105c9SRoy Pledge 	void __iomem *addr_cinh;
118c89105c9SRoy Pledge 
119c89105c9SRoy Pledge 	/* Management commands */
120c89105c9SRoy Pledge 	struct {
121c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
122c89105c9SRoy Pledge 	} mc;
123c89105c9SRoy Pledge 
1245842efa4SRoy Pledge 	/* Management response */
1255842efa4SRoy Pledge 	struct {
1265842efa4SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
1275842efa4SRoy Pledge 	} mr;
1285842efa4SRoy Pledge 
129c89105c9SRoy Pledge 	/* Push dequeues */
130c89105c9SRoy Pledge 	u32 sdq;
131c89105c9SRoy Pledge 
132c89105c9SRoy Pledge 	/* Volatile dequeues */
133c89105c9SRoy Pledge 	struct {
134c89105c9SRoy Pledge 		atomic_t available; /* indicates if a command can be sent */
135c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
136c89105c9SRoy Pledge 		struct dpaa2_dq *storage; /* NULL if DQRR */
137c89105c9SRoy Pledge 	} vdq;
138c89105c9SRoy Pledge 
139c89105c9SRoy Pledge 	/* DQRR */
140c89105c9SRoy Pledge 	struct {
141c89105c9SRoy Pledge 		u32 next_idx;
142c89105c9SRoy Pledge 		u32 valid_bit;
143c89105c9SRoy Pledge 		u8 dqrr_size;
144c89105c9SRoy Pledge 		int reset_bug; /* indicates dqrr reset workaround is needed */
145c89105c9SRoy Pledge 	} dqrr;
146c89105c9SRoy Pledge };
147c89105c9SRoy Pledge 
148b46fe745SYouri Querry /* Function pointers */
149b46fe745SYouri Querry extern
150b46fe745SYouri Querry int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
151b46fe745SYouri Querry 			     const struct qbman_eq_desc *d,
152b46fe745SYouri Querry 			     const struct dpaa2_fd *fd);
153b46fe745SYouri Querry extern
154b46fe745SYouri Querry int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
155b46fe745SYouri Querry 				      const struct qbman_eq_desc *d,
156b46fe745SYouri Querry 				      const struct dpaa2_fd *fd,
157b46fe745SYouri Querry 				      uint32_t *flags,
158b46fe745SYouri Querry 				      int num_frames);
159b46fe745SYouri Querry extern
160b46fe745SYouri Querry int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
161b46fe745SYouri Querry 					   const struct qbman_eq_desc *d,
162b46fe745SYouri Querry 					   const struct dpaa2_fd *fd,
163b46fe745SYouri Querry 					   int num_frames);
164b46fe745SYouri Querry extern
165b46fe745SYouri Querry int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
166b46fe745SYouri Querry extern
167b46fe745SYouri Querry const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
168b46fe745SYouri Querry extern
169b46fe745SYouri Querry int (*qbman_swp_release_ptr)(struct qbman_swp *s,
170b46fe745SYouri Querry 			     const struct qbman_release_desc *d,
171b46fe745SYouri Querry 			     const u64 *buffers,
172b46fe745SYouri Querry 			     unsigned int num_buffers);
173b46fe745SYouri Querry 
174b46fe745SYouri Querry /* Functions */
175c89105c9SRoy Pledge struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
176c89105c9SRoy Pledge void qbman_swp_finish(struct qbman_swp *p);
177c89105c9SRoy Pledge u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
178c89105c9SRoy Pledge void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
179c89105c9SRoy Pledge u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
180c89105c9SRoy Pledge void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
181c89105c9SRoy Pledge int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
182c89105c9SRoy Pledge void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
183c89105c9SRoy Pledge 
184c89105c9SRoy Pledge void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
185c89105c9SRoy Pledge void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
186c89105c9SRoy Pledge 
187c89105c9SRoy Pledge void qbman_pull_desc_clear(struct qbman_pull_desc *d);
188c89105c9SRoy Pledge void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
189c89105c9SRoy Pledge 				 struct dpaa2_dq *storage,
190c89105c9SRoy Pledge 				 dma_addr_t storage_phys,
191c89105c9SRoy Pledge 				 int stash);
192c89105c9SRoy Pledge void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
193c89105c9SRoy Pledge void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
194c89105c9SRoy Pledge void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
195c89105c9SRoy Pledge 			    enum qbman_pull_type_e dct);
196c89105c9SRoy Pledge void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
197c89105c9SRoy Pledge 				 enum qbman_pull_type_e dct);
198c89105c9SRoy Pledge 
199c89105c9SRoy Pledge void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
200c89105c9SRoy Pledge 
201c89105c9SRoy Pledge int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
202c89105c9SRoy Pledge 
203c89105c9SRoy Pledge void qbman_eq_desc_clear(struct qbman_eq_desc *d);
204c89105c9SRoy Pledge void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
205c89105c9SRoy Pledge void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
206c89105c9SRoy Pledge void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
207c89105c9SRoy Pledge void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
208c89105c9SRoy Pledge 			  u32 qd_bin, u32 qd_prio);
209c89105c9SRoy Pledge 
210c89105c9SRoy Pledge 
211c89105c9SRoy Pledge void qbman_release_desc_clear(struct qbman_release_desc *d);
212c89105c9SRoy Pledge void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
213c89105c9SRoy Pledge void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
214c89105c9SRoy Pledge 
215c89105c9SRoy Pledge int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
216c89105c9SRoy Pledge 		      unsigned int num_buffers);
217c89105c9SRoy Pledge int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
218c89105c9SRoy Pledge 			   u8 alt_fq_verb);
219c89105c9SRoy Pledge int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
220c89105c9SRoy Pledge 		       u8 we_mask, u8 cdan_en,
221c89105c9SRoy Pledge 		       u64 ctx);
222c89105c9SRoy Pledge 
223c89105c9SRoy Pledge void *qbman_swp_mc_start(struct qbman_swp *p);
224c89105c9SRoy Pledge void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
225c89105c9SRoy Pledge void *qbman_swp_mc_result(struct qbman_swp *p);
226c89105c9SRoy Pledge 
227b46fe745SYouri Querry /**
228b46fe745SYouri Querry  * qbman_swp_enqueue() - Issue an enqueue command
229b46fe745SYouri Querry  * @s:  the software portal used for enqueue
230b46fe745SYouri Querry  * @d:  the enqueue descriptor
231b46fe745SYouri Querry  * @fd: the frame descriptor to be enqueued
232b46fe745SYouri Querry  *
233b46fe745SYouri Querry  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
234b46fe745SYouri Querry  */
235b46fe745SYouri Querry static inline int
236b46fe745SYouri Querry qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
237b46fe745SYouri Querry 		  const struct dpaa2_fd *fd)
238b46fe745SYouri Querry {
239b46fe745SYouri Querry 	return qbman_swp_enqueue_ptr(s, d, fd);
240b46fe745SYouri Querry }
241b46fe745SYouri Querry 
242b46fe745SYouri Querry /**
243b46fe745SYouri Querry  * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
244b46fe745SYouri Querry  * using one enqueue descriptor
245b46fe745SYouri Querry  * @s:  the software portal used for enqueue
246b46fe745SYouri Querry  * @d:  the enqueue descriptor
247b46fe745SYouri Querry  * @fd: table pointer of frame descriptor table to be enqueued
248b46fe745SYouri Querry  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
249b46fe745SYouri Querry  * @num_frames: number of fd to be enqueued
250b46fe745SYouri Querry  *
251b46fe745SYouri Querry  * Return the number of fd enqueued, or a negative error number.
252b46fe745SYouri Querry  */
253b46fe745SYouri Querry static inline int
2549d988097SYouri Querry qbman_swp_enqueue_multiple(struct qbman_swp *s,
2559d988097SYouri Querry 			   const struct qbman_eq_desc *d,
2569d988097SYouri Querry 			   const struct dpaa2_fd *fd,
2579d988097SYouri Querry 			   uint32_t *flags,
258b46fe745SYouri Querry 			   int num_frames)
259b46fe745SYouri Querry {
260b46fe745SYouri Querry 	return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
261b46fe745SYouri Querry }
2629d988097SYouri Querry 
263b46fe745SYouri Querry /**
264b46fe745SYouri Querry  * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
265b46fe745SYouri Querry  * using multiple enqueue descriptor
266b46fe745SYouri Querry  * @s:  the software portal used for enqueue
267b46fe745SYouri Querry  * @d:  table of minimal enqueue descriptor
268b46fe745SYouri Querry  * @fd: table pointer of frame descriptor table to be enqueued
269b46fe745SYouri Querry  * @num_frames: number of fd to be enqueued
270b46fe745SYouri Querry  *
271b46fe745SYouri Querry  * Return the number of fd enqueued, or a negative error number.
272b46fe745SYouri Querry  */
273b46fe745SYouri Querry static inline int
2749d988097SYouri Querry qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
2759d988097SYouri Querry 				const struct qbman_eq_desc *d,
2769d988097SYouri Querry 				const struct dpaa2_fd *fd,
277b46fe745SYouri Querry 				int num_frames)
278b46fe745SYouri Querry {
279b46fe745SYouri Querry 	return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
280b46fe745SYouri Querry }
2819d988097SYouri Querry 
282c89105c9SRoy Pledge /**
283c89105c9SRoy Pledge  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
284c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
285c89105c9SRoy Pledge  *
286c89105c9SRoy Pledge  * DQRR entries may contain non-dequeue results, ie. notifications
287c89105c9SRoy Pledge  */
288c89105c9SRoy Pledge static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
289c89105c9SRoy Pledge {
290c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
291c89105c9SRoy Pledge }
292c89105c9SRoy Pledge 
293c89105c9SRoy Pledge /**
294c89105c9SRoy Pledge  * qbman_result_is_SCN() - Check the dequeue result is notification or not
295c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
296c89105c9SRoy Pledge  *
297c89105c9SRoy Pledge  */
298c89105c9SRoy Pledge static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
299c89105c9SRoy Pledge {
300c89105c9SRoy Pledge 	return !qbman_result_is_DQ(dq);
301c89105c9SRoy Pledge }
302c89105c9SRoy Pledge 
303c89105c9SRoy Pledge /* FQ Data Availability */
304c89105c9SRoy Pledge static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
305c89105c9SRoy Pledge {
306c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
307c89105c9SRoy Pledge }
308c89105c9SRoy Pledge 
309c89105c9SRoy Pledge /* Channel Data Availability */
310c89105c9SRoy Pledge static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
311c89105c9SRoy Pledge {
312c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
313c89105c9SRoy Pledge }
314c89105c9SRoy Pledge 
315c89105c9SRoy Pledge /* Congestion State Change */
316c89105c9SRoy Pledge static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
317c89105c9SRoy Pledge {
318c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
319c89105c9SRoy Pledge }
320c89105c9SRoy Pledge 
321c89105c9SRoy Pledge /* Buffer Pool State Change */
322c89105c9SRoy Pledge static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
323c89105c9SRoy Pledge {
324c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
325c89105c9SRoy Pledge }
326c89105c9SRoy Pledge 
327c89105c9SRoy Pledge /* Congestion Group Count Update */
328c89105c9SRoy Pledge static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
329c89105c9SRoy Pledge {
330c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
331c89105c9SRoy Pledge }
332c89105c9SRoy Pledge 
333c89105c9SRoy Pledge /* Retirement */
334c89105c9SRoy Pledge static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
335c89105c9SRoy Pledge {
336c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
337c89105c9SRoy Pledge }
338c89105c9SRoy Pledge 
339c89105c9SRoy Pledge /* Retirement Immediate */
340c89105c9SRoy Pledge static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
341c89105c9SRoy Pledge {
342c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
343c89105c9SRoy Pledge }
344c89105c9SRoy Pledge 
345c89105c9SRoy Pledge  /* Park */
346c89105c9SRoy Pledge static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
347c89105c9SRoy Pledge {
348c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
349c89105c9SRoy Pledge }
350c89105c9SRoy Pledge 
351c89105c9SRoy Pledge /**
352c89105c9SRoy Pledge  * qbman_result_SCN_state() - Get the state field in State-change notification
353c89105c9SRoy Pledge  */
354c89105c9SRoy Pledge static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
355c89105c9SRoy Pledge {
356c89105c9SRoy Pledge 	return scn->scn.state;
357c89105c9SRoy Pledge }
358c89105c9SRoy Pledge 
359c89105c9SRoy Pledge #define SCN_RID_MASK 0x00FFFFFF
360c89105c9SRoy Pledge 
361c89105c9SRoy Pledge /**
362c89105c9SRoy Pledge  * qbman_result_SCN_rid() - Get the resource id in State-change notification
363c89105c9SRoy Pledge  */
364c89105c9SRoy Pledge static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
365c89105c9SRoy Pledge {
366c89105c9SRoy Pledge 	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
367c89105c9SRoy Pledge }
368c89105c9SRoy Pledge 
369c89105c9SRoy Pledge /**
370c89105c9SRoy Pledge  * qbman_result_SCN_ctx() - Get the context data in State-change notification
371c89105c9SRoy Pledge  */
372c89105c9SRoy Pledge static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
373c89105c9SRoy Pledge {
374c89105c9SRoy Pledge 	return le64_to_cpu(scn->scn.ctx);
375c89105c9SRoy Pledge }
376c89105c9SRoy Pledge 
377c89105c9SRoy Pledge /**
378c89105c9SRoy Pledge  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
379c89105c9SRoy Pledge  * @s:    the software portal object
380c89105c9SRoy Pledge  * @fqid: the index of frame queue to be scheduled
381c89105c9SRoy Pledge  *
382c89105c9SRoy Pledge  * There are a couple of different ways that a FQ can end up parked state,
383c89105c9SRoy Pledge  * This schedules it.
384c89105c9SRoy Pledge  *
385c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
386c89105c9SRoy Pledge  */
387c89105c9SRoy Pledge static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
388c89105c9SRoy Pledge {
389c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
390c89105c9SRoy Pledge }
391c89105c9SRoy Pledge 
392c89105c9SRoy Pledge /**
393c89105c9SRoy Pledge  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
394c89105c9SRoy Pledge  * @s:    the software portal object
395c89105c9SRoy Pledge  * @fqid: the index of frame queue to be forced
396c89105c9SRoy Pledge  *
397c89105c9SRoy Pledge  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
398c89105c9SRoy Pledge  * and thus be available for selection by any channel-dequeuing behaviour (push
399c89105c9SRoy Pledge  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
400c89105c9SRoy Pledge  * empty at the time this happens, the resulting dq_entry will have no FD.
401c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
402c89105c9SRoy Pledge  *
403c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
404c89105c9SRoy Pledge  */
405c89105c9SRoy Pledge static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
406c89105c9SRoy Pledge {
407c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
408c89105c9SRoy Pledge }
409c89105c9SRoy Pledge 
410c89105c9SRoy Pledge /**
411c89105c9SRoy Pledge  * qbman_swp_fq_xon() - sets FQ flow-control to XON
412c89105c9SRoy Pledge  * @s:    the software portal object
413c89105c9SRoy Pledge  * @fqid: the index of frame queue
414c89105c9SRoy Pledge  *
415c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
416c89105c9SRoy Pledge  *
417c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
418c89105c9SRoy Pledge  */
419c89105c9SRoy Pledge static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
420c89105c9SRoy Pledge {
421c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
422c89105c9SRoy Pledge }
423c89105c9SRoy Pledge 
424c89105c9SRoy Pledge /**
425c89105c9SRoy Pledge  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
426c89105c9SRoy Pledge  * @s:    the software portal object
427c89105c9SRoy Pledge  * @fqid: the index of frame queue
428c89105c9SRoy Pledge  *
429c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
430c89105c9SRoy Pledge  * XOFF FQs will remain in the tenatively-scheduled state, even when
431c89105c9SRoy Pledge  * non-empty, meaning they won't be selected for scheduled dequeuing.
432c89105c9SRoy Pledge  * If a FQ is changed to XOFF after it had already become truly-scheduled
433c89105c9SRoy Pledge  * to a channel, and a pull dequeue of that channel occurs that selects
434c89105c9SRoy Pledge  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
435c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
436c89105c9SRoy Pledge  *
437c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
438c89105c9SRoy Pledge  */
439c89105c9SRoy Pledge static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
440c89105c9SRoy Pledge {
441c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
442c89105c9SRoy Pledge }
443c89105c9SRoy Pledge 
444c89105c9SRoy Pledge /* If the user has been allocated a channel object that is going to generate
445c89105c9SRoy Pledge  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
446c89105c9SRoy Pledge  * necessary.
447c89105c9SRoy Pledge  *
448c89105c9SRoy Pledge  * CDAN-enabled channels only generate a single CDAN notification, after which
449c89105c9SRoy Pledge  * they need to be reenabled before they'll generate another. The idea is
450c89105c9SRoy Pledge  * that pull dequeuing will occur in reaction to the CDAN, followed by a
451c89105c9SRoy Pledge  * reenable step. Each function generates a distinct command to hardware, so a
452c89105c9SRoy Pledge  * combination function is provided if the user wishes to modify the "context"
453c89105c9SRoy Pledge  * (which shows up in each CDAN message) each time they reenable, as a single
454c89105c9SRoy Pledge  * command to hardware.
455c89105c9SRoy Pledge  */
456c89105c9SRoy Pledge 
457c89105c9SRoy Pledge /**
458c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context() - Set CDAN context
459c89105c9SRoy Pledge  * @s:         the software portal object
460c89105c9SRoy Pledge  * @channelid: the channel index
461c89105c9SRoy Pledge  * @ctx:       the context to be set in CDAN
462c89105c9SRoy Pledge  *
463c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
464c89105c9SRoy Pledge  */
465c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
466c89105c9SRoy Pledge 					     u64 ctx)
467c89105c9SRoy Pledge {
468c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
469c89105c9SRoy Pledge 				  CODE_CDAN_WE_CTX,
470c89105c9SRoy Pledge 				  0, ctx);
471c89105c9SRoy Pledge }
472c89105c9SRoy Pledge 
473c89105c9SRoy Pledge /**
474c89105c9SRoy Pledge  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
475c89105c9SRoy Pledge  * @s:         the software portal object
476c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
477c89105c9SRoy Pledge  *
478c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
479c89105c9SRoy Pledge  */
480c89105c9SRoy Pledge static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
481c89105c9SRoy Pledge {
482c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
483c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
484c89105c9SRoy Pledge 				  1, 0);
485c89105c9SRoy Pledge }
486c89105c9SRoy Pledge 
487c89105c9SRoy Pledge /**
488c89105c9SRoy Pledge  * qbman_swp_CDAN_disable() - disable CDAN for the channel
489c89105c9SRoy Pledge  * @s:         the software portal object
490c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
491c89105c9SRoy Pledge  *
492c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
493c89105c9SRoy Pledge  */
494c89105c9SRoy Pledge static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
495c89105c9SRoy Pledge {
496c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
497c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
498c89105c9SRoy Pledge 				  0, 0);
499c89105c9SRoy Pledge }
500c89105c9SRoy Pledge 
501c89105c9SRoy Pledge /**
502c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
503c89105c9SRoy Pledge  * @s:         the software portal object
504c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
505c89105c9SRoy Pledge  * @ctx:i      the context set in CDAN
506c89105c9SRoy Pledge  *
507c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
508c89105c9SRoy Pledge  */
509c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
510c89105c9SRoy Pledge 						    u16 channelid,
511c89105c9SRoy Pledge 						    u64 ctx)
512c89105c9SRoy Pledge {
513c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
514c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
515c89105c9SRoy Pledge 				  1, ctx);
516c89105c9SRoy Pledge }
517c89105c9SRoy Pledge 
518c89105c9SRoy Pledge /* Wraps up submit + poll-for-result */
519c89105c9SRoy Pledge static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
520c89105c9SRoy Pledge 					  u8 cmd_verb)
521c89105c9SRoy Pledge {
522d31beda2SYouri Querry 	int loopvar = 2000;
523c89105c9SRoy Pledge 
524c89105c9SRoy Pledge 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
525c89105c9SRoy Pledge 
526c89105c9SRoy Pledge 	do {
527c89105c9SRoy Pledge 		cmd = qbman_swp_mc_result(swp);
528c89105c9SRoy Pledge 	} while (!cmd && loopvar--);
529c89105c9SRoy Pledge 
530c89105c9SRoy Pledge 	WARN_ON(!loopvar);
531c89105c9SRoy Pledge 
532c89105c9SRoy Pledge 	return cmd;
533c89105c9SRoy Pledge }
534c89105c9SRoy Pledge 
535e80081c3SRoy Pledge /* Query APIs */
536e80081c3SRoy Pledge struct qbman_fq_query_np_rslt {
537e80081c3SRoy Pledge 	u8 verb;
538e80081c3SRoy Pledge 	u8 rslt;
539e80081c3SRoy Pledge 	u8 st1;
540e80081c3SRoy Pledge 	u8 st2;
541e80081c3SRoy Pledge 	u8 reserved[2];
542e80081c3SRoy Pledge 	__le16 od1_sfdr;
543e80081c3SRoy Pledge 	__le16 od2_sfdr;
544e80081c3SRoy Pledge 	__le16 od3_sfdr;
545e80081c3SRoy Pledge 	__le16 ra1_sfdr;
546e80081c3SRoy Pledge 	__le16 ra2_sfdr;
547e80081c3SRoy Pledge 	__le32 pfdr_hptr;
548e80081c3SRoy Pledge 	__le32 pfdr_tptr;
549e80081c3SRoy Pledge 	__le32 frm_cnt;
550e80081c3SRoy Pledge 	__le32 byte_cnt;
551e80081c3SRoy Pledge 	__le16 ics_surp;
552e80081c3SRoy Pledge 	u8 is;
553e80081c3SRoy Pledge 	u8 reserved2[29];
554e80081c3SRoy Pledge };
555e80081c3SRoy Pledge 
556e80081c3SRoy Pledge int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
557e80081c3SRoy Pledge 			 struct qbman_fq_query_np_rslt *r);
558e80081c3SRoy Pledge u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
559e80081c3SRoy Pledge u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
560e80081c3SRoy Pledge 
561e80081c3SRoy Pledge struct qbman_bp_query_rslt {
562e80081c3SRoy Pledge 	u8 verb;
563e80081c3SRoy Pledge 	u8 rslt;
564e80081c3SRoy Pledge 	u8 reserved[4];
565e80081c3SRoy Pledge 	u8 bdi;
566e80081c3SRoy Pledge 	u8 state;
567e80081c3SRoy Pledge 	__le32 fill;
568e80081c3SRoy Pledge 	__le32 hdotr;
569e80081c3SRoy Pledge 	__le16 swdet;
570e80081c3SRoy Pledge 	__le16 swdxt;
571e80081c3SRoy Pledge 	__le16 hwdet;
572e80081c3SRoy Pledge 	__le16 hwdxt;
573e80081c3SRoy Pledge 	__le16 swset;
574e80081c3SRoy Pledge 	__le16 swsxt;
575e80081c3SRoy Pledge 	__le16 vbpid;
576e80081c3SRoy Pledge 	__le16 icid;
577e80081c3SRoy Pledge 	__le64 bpscn_addr;
578e80081c3SRoy Pledge 	__le64 bpscn_ctx;
579e80081c3SRoy Pledge 	__le16 hw_targ;
580e80081c3SRoy Pledge 	u8 dbe;
581e80081c3SRoy Pledge 	u8 reserved2;
582e80081c3SRoy Pledge 	u8 sdcnt;
583e80081c3SRoy Pledge 	u8 hdcnt;
584e80081c3SRoy Pledge 	u8 sscnt;
585e80081c3SRoy Pledge 	u8 reserved3[9];
586e80081c3SRoy Pledge };
587e80081c3SRoy Pledge 
588e80081c3SRoy Pledge int qbman_bp_query(struct qbman_swp *s, u16 bpid,
589e80081c3SRoy Pledge 		   struct qbman_bp_query_rslt *r);
590e80081c3SRoy Pledge 
591e80081c3SRoy Pledge u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
592e80081c3SRoy Pledge 
593b46fe745SYouri Querry /**
594b46fe745SYouri Querry  * qbman_swp_release() - Issue a buffer release command
595b46fe745SYouri Querry  * @s:           the software portal object
596b46fe745SYouri Querry  * @d:           the release descriptor
597b46fe745SYouri Querry  * @buffers:     a pointer pointing to the buffer address to be released
598b46fe745SYouri Querry  * @num_buffers: number of buffers to be released,  must be less than 8
599b46fe745SYouri Querry  *
600b46fe745SYouri Querry  * Return 0 for success, -EBUSY if the release command ring is not ready.
601b46fe745SYouri Querry  */
602b46fe745SYouri Querry static inline int qbman_swp_release(struct qbman_swp *s,
603b46fe745SYouri Querry 				    const struct qbman_release_desc *d,
604b46fe745SYouri Querry 				    const u64 *buffers,
605b46fe745SYouri Querry 				    unsigned int num_buffers)
606b46fe745SYouri Querry {
607b46fe745SYouri Querry 	return qbman_swp_release_ptr(s, d, buffers, num_buffers);
608b46fe745SYouri Querry }
609b46fe745SYouri Querry 
610b46fe745SYouri Querry /**
611b46fe745SYouri Querry  * qbman_swp_pull() - Issue the pull dequeue command
612b46fe745SYouri Querry  * @s: the software portal object
613b46fe745SYouri Querry  * @d: the software portal descriptor which has been configured with
614b46fe745SYouri Querry  *     the set of qbman_pull_desc_set_*() calls
615b46fe745SYouri Querry  *
616b46fe745SYouri Querry  * Return 0 for success, and -EBUSY if the software portal is not ready
617b46fe745SYouri Querry  * to do pull dequeue.
618b46fe745SYouri Querry  */
619b46fe745SYouri Querry static inline int qbman_swp_pull(struct qbman_swp *s,
620b46fe745SYouri Querry 				 struct qbman_pull_desc *d)
621b46fe745SYouri Querry {
622b46fe745SYouri Querry 	return qbman_swp_pull_ptr(s, d);
623b46fe745SYouri Querry }
624b46fe745SYouri Querry 
625b46fe745SYouri Querry /**
626b46fe745SYouri Querry  * qbman_swp_dqrr_next() - Get an valid DQRR entry
627b46fe745SYouri Querry  * @s: the software portal object
628b46fe745SYouri Querry  *
629b46fe745SYouri Querry  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
630b46fe745SYouri Querry  * only once, so repeated calls can return a sequence of DQRR entries, without
631b46fe745SYouri Querry  * requiring they be consumed immediately or in any particular order.
632b46fe745SYouri Querry  */
633b46fe745SYouri Querry static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
634b46fe745SYouri Querry {
635b46fe745SYouri Querry 	return qbman_swp_dqrr_next_ptr(s);
636b46fe745SYouri Querry }
637b46fe745SYouri Querry 
638c89105c9SRoy Pledge #endif /* __FSL_QBMAN_PORTAL_H */
639