1c89105c9SRoy Pledge /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2c89105c9SRoy Pledge /*
3c89105c9SRoy Pledge  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4d31beda2SYouri Querry  * Copyright 2016-2019 NXP
5c89105c9SRoy Pledge  *
6c89105c9SRoy Pledge  */
7c89105c9SRoy Pledge #ifndef __FSL_QBMAN_PORTAL_H
8c89105c9SRoy Pledge #define __FSL_QBMAN_PORTAL_H
9c89105c9SRoy Pledge 
10c89105c9SRoy Pledge #include <soc/fsl/dpaa2-fd.h>
11c89105c9SRoy Pledge 
129d988097SYouri Querry #define QMAN_REV_4000   0x04000000
139d988097SYouri Querry #define QMAN_REV_4100   0x04010000
149d988097SYouri Querry #define QMAN_REV_4101   0x04010001
159d988097SYouri Querry #define QMAN_REV_5000   0x05000000
169d988097SYouri Querry 
179d988097SYouri Querry #define QMAN_REV_MASK   0xffff0000
189d988097SYouri Querry 
19c89105c9SRoy Pledge struct dpaa2_dq;
20c89105c9SRoy Pledge struct qbman_swp;
21c89105c9SRoy Pledge 
22c89105c9SRoy Pledge /* qbman software portal descriptor structure */
23c89105c9SRoy Pledge struct qbman_swp_desc {
24c89105c9SRoy Pledge 	void *cena_bar; /* Cache-enabled portal base address */
25c89105c9SRoy Pledge 	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26c89105c9SRoy Pledge 	u32 qman_version;
272cf0b6feSIoana Ciornei 	u32 qman_clk;
28ed1d2143SIoana Ciornei 	u32 qman_256_cycles_per_ns;
29c89105c9SRoy Pledge };
30c89105c9SRoy Pledge 
31c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQRI 0x01
32c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQDI 0x02
33c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_DQRI 0x04
34c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCRI 0x08
35c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCDI 0x10
36c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_VDCI 0x20
37c89105c9SRoy Pledge 
38c89105c9SRoy Pledge /* the structure for pull dequeue descriptor */
39c89105c9SRoy Pledge struct qbman_pull_desc {
40c89105c9SRoy Pledge 	u8 verb;
41c89105c9SRoy Pledge 	u8 numf;
42c89105c9SRoy Pledge 	u8 tok;
43c89105c9SRoy Pledge 	u8 reserved;
44c89105c9SRoy Pledge 	__le32 dq_src;
45c89105c9SRoy Pledge 	__le64 rsp_addr;
46c89105c9SRoy Pledge 	u64 rsp_addr_virt;
47c89105c9SRoy Pledge 	u8 padding[40];
48c89105c9SRoy Pledge };
49c89105c9SRoy Pledge 
50c89105c9SRoy Pledge enum qbman_pull_type_e {
51c89105c9SRoy Pledge 	/* dequeue with priority precedence, respect intra-class scheduling */
52c89105c9SRoy Pledge 	qbman_pull_type_prio = 1,
53c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, respect ICS */
54c89105c9SRoy Pledge 	qbman_pull_type_active,
55c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, no ICS */
56c89105c9SRoy Pledge 	qbman_pull_type_active_noics
57c89105c9SRoy Pledge };
58c89105c9SRoy Pledge 
59c89105c9SRoy Pledge /* Definitions for parsing dequeue entries */
60c89105c9SRoy Pledge #define QBMAN_RESULT_MASK      0x7f
61c89105c9SRoy Pledge #define QBMAN_RESULT_DQ        0x60
62c89105c9SRoy Pledge #define QBMAN_RESULT_FQRN      0x21
63c89105c9SRoy Pledge #define QBMAN_RESULT_FQRNI     0x22
64c89105c9SRoy Pledge #define QBMAN_RESULT_FQPN      0x24
65c89105c9SRoy Pledge #define QBMAN_RESULT_FQDAN     0x25
66c89105c9SRoy Pledge #define QBMAN_RESULT_CDAN      0x26
67c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_MEM  0x27
68c89105c9SRoy Pledge #define QBMAN_RESULT_CGCU      0x28
69c89105c9SRoy Pledge #define QBMAN_RESULT_BPSCN     0x29
70c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_WQ   0x2a
71c89105c9SRoy Pledge 
72c89105c9SRoy Pledge /* QBMan FQ management command codes */
73c89105c9SRoy Pledge #define QBMAN_FQ_SCHEDULE	0x48
74c89105c9SRoy Pledge #define QBMAN_FQ_FORCE		0x49
75c89105c9SRoy Pledge #define QBMAN_FQ_XON		0x4d
76c89105c9SRoy Pledge #define QBMAN_FQ_XOFF		0x4e
77c89105c9SRoy Pledge 
78c89105c9SRoy Pledge /* structure of enqueue descriptor */
79c89105c9SRoy Pledge struct qbman_eq_desc {
80c89105c9SRoy Pledge 	u8 verb;
81c89105c9SRoy Pledge 	u8 dca;
82c89105c9SRoy Pledge 	__le16 seqnum;
83c89105c9SRoy Pledge 	__le16 orpid;
84c89105c9SRoy Pledge 	__le16 reserved1;
85c89105c9SRoy Pledge 	__le32 tgtid;
86c89105c9SRoy Pledge 	__le32 tag;
87c89105c9SRoy Pledge 	__le16 qdbin;
88c89105c9SRoy Pledge 	u8 qpri;
89c89105c9SRoy Pledge 	u8 reserved[3];
90c89105c9SRoy Pledge 	u8 wae;
91c89105c9SRoy Pledge 	u8 rspid;
92c89105c9SRoy Pledge 	__le64 rsp_addr;
939d988097SYouri Querry };
949d988097SYouri Querry 
959d988097SYouri Querry struct qbman_eq_desc_with_fd {
969d988097SYouri Querry 	struct qbman_eq_desc desc;
97c89105c9SRoy Pledge 	u8 fd[32];
98c89105c9SRoy Pledge };
99c89105c9SRoy Pledge 
100c89105c9SRoy Pledge /* buffer release descriptor */
101c89105c9SRoy Pledge struct qbman_release_desc {
102c89105c9SRoy Pledge 	u8 verb;
103c89105c9SRoy Pledge 	u8 reserved;
104c89105c9SRoy Pledge 	__le16 bpid;
105c89105c9SRoy Pledge 	__le32 reserved2;
106c89105c9SRoy Pledge 	__le64 buf[7];
107c89105c9SRoy Pledge };
108c89105c9SRoy Pledge 
109c89105c9SRoy Pledge /* Management command result codes */
110c89105c9SRoy Pledge #define QBMAN_MC_RSLT_OK      0xf0
111c89105c9SRoy Pledge 
112c89105c9SRoy Pledge #define CODE_CDAN_WE_EN    0x1
113c89105c9SRoy Pledge #define CODE_CDAN_WE_CTX   0x4
114c89105c9SRoy Pledge 
115c89105c9SRoy Pledge /* portal data structure */
116c89105c9SRoy Pledge struct qbman_swp {
117c89105c9SRoy Pledge 	const struct qbman_swp_desc *desc;
118c89105c9SRoy Pledge 	void *addr_cena;
119c89105c9SRoy Pledge 	void __iomem *addr_cinh;
120c89105c9SRoy Pledge 
121c89105c9SRoy Pledge 	/* Management commands */
122c89105c9SRoy Pledge 	struct {
123c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
124c89105c9SRoy Pledge 	} mc;
125c89105c9SRoy Pledge 
1265842efa4SRoy Pledge 	/* Management response */
1275842efa4SRoy Pledge 	struct {
1285842efa4SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
1295842efa4SRoy Pledge 	} mr;
1305842efa4SRoy Pledge 
131c89105c9SRoy Pledge 	/* Push dequeues */
132c89105c9SRoy Pledge 	u32 sdq;
133c89105c9SRoy Pledge 
134c89105c9SRoy Pledge 	/* Volatile dequeues */
135c89105c9SRoy Pledge 	struct {
136c89105c9SRoy Pledge 		atomic_t available; /* indicates if a command can be sent */
137c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
138c89105c9SRoy Pledge 		struct dpaa2_dq *storage; /* NULL if DQRR */
139c89105c9SRoy Pledge 	} vdq;
140c89105c9SRoy Pledge 
141c89105c9SRoy Pledge 	/* DQRR */
142c89105c9SRoy Pledge 	struct {
143c89105c9SRoy Pledge 		u32 next_idx;
144c89105c9SRoy Pledge 		u32 valid_bit;
145c89105c9SRoy Pledge 		u8 dqrr_size;
146c89105c9SRoy Pledge 		int reset_bug; /* indicates dqrr reset workaround is needed */
147c89105c9SRoy Pledge 	} dqrr;
1483b2abda7SYouri Querry 
1493b2abda7SYouri Querry 	struct {
1503b2abda7SYouri Querry 		u32 pi;
1513b2abda7SYouri Querry 		u32 pi_vb;
1523b2abda7SYouri Querry 		u32 pi_ring_size;
1533b2abda7SYouri Querry 		u32 pi_ci_mask;
1543b2abda7SYouri Querry 		u32 ci;
1553b2abda7SYouri Querry 		int available;
1563b2abda7SYouri Querry 		u32 pend;
1573b2abda7SYouri Querry 		u32 no_pfdr;
1583b2abda7SYouri Querry 	} eqcr;
1593b2abda7SYouri Querry 
1603b2abda7SYouri Querry 	spinlock_t access_spinlock;
161ed1d2143SIoana Ciornei 
162ed1d2143SIoana Ciornei 	/* Interrupt coalescing */
163ed1d2143SIoana Ciornei 	u32 irq_threshold;
164ed1d2143SIoana Ciornei 	u32 irq_holdoff;
165*69651bd8SIoana Ciornei 	int use_adaptive_rx_coalesce;
166c89105c9SRoy Pledge };
167c89105c9SRoy Pledge 
168b46fe745SYouri Querry /* Function pointers */
169b46fe745SYouri Querry extern
170b46fe745SYouri Querry int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
171b46fe745SYouri Querry 			     const struct qbman_eq_desc *d,
172b46fe745SYouri Querry 			     const struct dpaa2_fd *fd);
173b46fe745SYouri Querry extern
174b46fe745SYouri Querry int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
175b46fe745SYouri Querry 				      const struct qbman_eq_desc *d,
176b46fe745SYouri Querry 				      const struct dpaa2_fd *fd,
177b46fe745SYouri Querry 				      uint32_t *flags,
178b46fe745SYouri Querry 				      int num_frames);
179b46fe745SYouri Querry extern
180b46fe745SYouri Querry int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
181b46fe745SYouri Querry 					   const struct qbman_eq_desc *d,
182b46fe745SYouri Querry 					   const struct dpaa2_fd *fd,
183b46fe745SYouri Querry 					   int num_frames);
184b46fe745SYouri Querry extern
185b46fe745SYouri Querry int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
186b46fe745SYouri Querry extern
187b46fe745SYouri Querry const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
188b46fe745SYouri Querry extern
189b46fe745SYouri Querry int (*qbman_swp_release_ptr)(struct qbman_swp *s,
190b46fe745SYouri Querry 			     const struct qbman_release_desc *d,
191b46fe745SYouri Querry 			     const u64 *buffers,
192b46fe745SYouri Querry 			     unsigned int num_buffers);
193b46fe745SYouri Querry 
194b46fe745SYouri Querry /* Functions */
195c89105c9SRoy Pledge struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
196c89105c9SRoy Pledge void qbman_swp_finish(struct qbman_swp *p);
197c89105c9SRoy Pledge u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
198c89105c9SRoy Pledge void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
199c89105c9SRoy Pledge u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
200c89105c9SRoy Pledge void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
201c89105c9SRoy Pledge int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
202c89105c9SRoy Pledge void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
203c89105c9SRoy Pledge 
204c89105c9SRoy Pledge void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
205c89105c9SRoy Pledge void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
206c89105c9SRoy Pledge 
207c89105c9SRoy Pledge void qbman_pull_desc_clear(struct qbman_pull_desc *d);
208c89105c9SRoy Pledge void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
209c89105c9SRoy Pledge 				 struct dpaa2_dq *storage,
210c89105c9SRoy Pledge 				 dma_addr_t storage_phys,
211c89105c9SRoy Pledge 				 int stash);
212c89105c9SRoy Pledge void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
213c89105c9SRoy Pledge void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
214c89105c9SRoy Pledge void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
215c89105c9SRoy Pledge 			    enum qbman_pull_type_e dct);
216c89105c9SRoy Pledge void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
217c89105c9SRoy Pledge 				 enum qbman_pull_type_e dct);
218c89105c9SRoy Pledge 
219c89105c9SRoy Pledge void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
220c89105c9SRoy Pledge 
221c89105c9SRoy Pledge int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
222c89105c9SRoy Pledge 
223c89105c9SRoy Pledge void qbman_eq_desc_clear(struct qbman_eq_desc *d);
224c89105c9SRoy Pledge void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
225c89105c9SRoy Pledge void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
226c89105c9SRoy Pledge void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
227c89105c9SRoy Pledge void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
228c89105c9SRoy Pledge 			  u32 qd_bin, u32 qd_prio);
229c89105c9SRoy Pledge 
230c89105c9SRoy Pledge 
231c89105c9SRoy Pledge void qbman_release_desc_clear(struct qbman_release_desc *d);
232c89105c9SRoy Pledge void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
233c89105c9SRoy Pledge void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
234c89105c9SRoy Pledge 
235c89105c9SRoy Pledge int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
236c89105c9SRoy Pledge 		      unsigned int num_buffers);
237c89105c9SRoy Pledge int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
238c89105c9SRoy Pledge 			   u8 alt_fq_verb);
239c89105c9SRoy Pledge int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
240c89105c9SRoy Pledge 		       u8 we_mask, u8 cdan_en,
241c89105c9SRoy Pledge 		       u64 ctx);
242c89105c9SRoy Pledge 
243c89105c9SRoy Pledge void *qbman_swp_mc_start(struct qbman_swp *p);
244c89105c9SRoy Pledge void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
245c89105c9SRoy Pledge void *qbman_swp_mc_result(struct qbman_swp *p);
246c89105c9SRoy Pledge 
247b46fe745SYouri Querry /**
248b46fe745SYouri Querry  * qbman_swp_enqueue() - Issue an enqueue command
249b46fe745SYouri Querry  * @s:  the software portal used for enqueue
250b46fe745SYouri Querry  * @d:  the enqueue descriptor
251b46fe745SYouri Querry  * @fd: the frame descriptor to be enqueued
252b46fe745SYouri Querry  *
253b46fe745SYouri Querry  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
254b46fe745SYouri Querry  */
255b46fe745SYouri Querry static inline int
qbman_swp_enqueue(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd)256b46fe745SYouri Querry qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
257b46fe745SYouri Querry 		  const struct dpaa2_fd *fd)
258b46fe745SYouri Querry {
259b46fe745SYouri Querry 	return qbman_swp_enqueue_ptr(s, d, fd);
260b46fe745SYouri Querry }
261b46fe745SYouri Querry 
262b46fe745SYouri Querry /**
263b46fe745SYouri Querry  * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
264b46fe745SYouri Querry  * using one enqueue descriptor
265b46fe745SYouri Querry  * @s:  the software portal used for enqueue
266b46fe745SYouri Querry  * @d:  the enqueue descriptor
267b46fe745SYouri Querry  * @fd: table pointer of frame descriptor table to be enqueued
268b46fe745SYouri Querry  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
269b46fe745SYouri Querry  * @num_frames: number of fd to be enqueued
270b46fe745SYouri Querry  *
271b46fe745SYouri Querry  * Return the number of fd enqueued, or a negative error number.
272b46fe745SYouri Querry  */
273b46fe745SYouri Querry static inline int
qbman_swp_enqueue_multiple(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,uint32_t * flags,int num_frames)2749d988097SYouri Querry qbman_swp_enqueue_multiple(struct qbman_swp *s,
2759d988097SYouri Querry 			   const struct qbman_eq_desc *d,
2769d988097SYouri Querry 			   const struct dpaa2_fd *fd,
2779d988097SYouri Querry 			   uint32_t *flags,
278b46fe745SYouri Querry 			   int num_frames)
279b46fe745SYouri Querry {
280b46fe745SYouri Querry 	return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
281b46fe745SYouri Querry }
2829d988097SYouri Querry 
283b46fe745SYouri Querry /**
284b46fe745SYouri Querry  * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
285b46fe745SYouri Querry  * using multiple enqueue descriptor
286b46fe745SYouri Querry  * @s:  the software portal used for enqueue
287b46fe745SYouri Querry  * @d:  table of minimal enqueue descriptor
288b46fe745SYouri Querry  * @fd: table pointer of frame descriptor table to be enqueued
289b46fe745SYouri Querry  * @num_frames: number of fd to be enqueued
290b46fe745SYouri Querry  *
291b46fe745SYouri Querry  * Return the number of fd enqueued, or a negative error number.
292b46fe745SYouri Querry  */
293b46fe745SYouri Querry static inline int
qbman_swp_enqueue_multiple_desc(struct qbman_swp * s,const struct qbman_eq_desc * d,const struct dpaa2_fd * fd,int num_frames)2949d988097SYouri Querry qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
2959d988097SYouri Querry 				const struct qbman_eq_desc *d,
2969d988097SYouri Querry 				const struct dpaa2_fd *fd,
297b46fe745SYouri Querry 				int num_frames)
298b46fe745SYouri Querry {
299b46fe745SYouri Querry 	return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
300b46fe745SYouri Querry }
3019d988097SYouri Querry 
302c89105c9SRoy Pledge /**
303c89105c9SRoy Pledge  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
304c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
305c89105c9SRoy Pledge  *
306c89105c9SRoy Pledge  * DQRR entries may contain non-dequeue results, ie. notifications
307c89105c9SRoy Pledge  */
qbman_result_is_DQ(const struct dpaa2_dq * dq)308c89105c9SRoy Pledge static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
309c89105c9SRoy Pledge {
310c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
311c89105c9SRoy Pledge }
312c89105c9SRoy Pledge 
313c89105c9SRoy Pledge /**
314c89105c9SRoy Pledge  * qbman_result_is_SCN() - Check the dequeue result is notification or not
315c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
316c89105c9SRoy Pledge  *
317c89105c9SRoy Pledge  */
qbman_result_is_SCN(const struct dpaa2_dq * dq)318c89105c9SRoy Pledge static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
319c89105c9SRoy Pledge {
320c89105c9SRoy Pledge 	return !qbman_result_is_DQ(dq);
321c89105c9SRoy Pledge }
322c89105c9SRoy Pledge 
323c89105c9SRoy Pledge /* FQ Data Availability */
qbman_result_is_FQDAN(const struct dpaa2_dq * dq)324c89105c9SRoy Pledge static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
325c89105c9SRoy Pledge {
326c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
327c89105c9SRoy Pledge }
328c89105c9SRoy Pledge 
329c89105c9SRoy Pledge /* Channel Data Availability */
qbman_result_is_CDAN(const struct dpaa2_dq * dq)330c89105c9SRoy Pledge static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
331c89105c9SRoy Pledge {
332c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
333c89105c9SRoy Pledge }
334c89105c9SRoy Pledge 
335c89105c9SRoy Pledge /* Congestion State Change */
qbman_result_is_CSCN(const struct dpaa2_dq * dq)336c89105c9SRoy Pledge static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
337c89105c9SRoy Pledge {
338c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
339c89105c9SRoy Pledge }
340c89105c9SRoy Pledge 
341c89105c9SRoy Pledge /* Buffer Pool State Change */
qbman_result_is_BPSCN(const struct dpaa2_dq * dq)342c89105c9SRoy Pledge static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
343c89105c9SRoy Pledge {
344c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
345c89105c9SRoy Pledge }
346c89105c9SRoy Pledge 
347c89105c9SRoy Pledge /* Congestion Group Count Update */
qbman_result_is_CGCU(const struct dpaa2_dq * dq)348c89105c9SRoy Pledge static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
349c89105c9SRoy Pledge {
350c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
351c89105c9SRoy Pledge }
352c89105c9SRoy Pledge 
353c89105c9SRoy Pledge /* Retirement */
qbman_result_is_FQRN(const struct dpaa2_dq * dq)354c89105c9SRoy Pledge static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
355c89105c9SRoy Pledge {
356c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
357c89105c9SRoy Pledge }
358c89105c9SRoy Pledge 
359c89105c9SRoy Pledge /* Retirement Immediate */
qbman_result_is_FQRNI(const struct dpaa2_dq * dq)360c89105c9SRoy Pledge static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
361c89105c9SRoy Pledge {
362c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
363c89105c9SRoy Pledge }
364c89105c9SRoy Pledge 
365c89105c9SRoy Pledge  /* Park */
qbman_result_is_FQPN(const struct dpaa2_dq * dq)366c89105c9SRoy Pledge static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
367c89105c9SRoy Pledge {
368c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
369c89105c9SRoy Pledge }
370c89105c9SRoy Pledge 
371c89105c9SRoy Pledge /**
372c89105c9SRoy Pledge  * qbman_result_SCN_state() - Get the state field in State-change notification
373c89105c9SRoy Pledge  */
qbman_result_SCN_state(const struct dpaa2_dq * scn)374c89105c9SRoy Pledge static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
375c89105c9SRoy Pledge {
376c89105c9SRoy Pledge 	return scn->scn.state;
377c89105c9SRoy Pledge }
378c89105c9SRoy Pledge 
379c89105c9SRoy Pledge #define SCN_RID_MASK 0x00FFFFFF
380c89105c9SRoy Pledge 
381c89105c9SRoy Pledge /**
382c89105c9SRoy Pledge  * qbman_result_SCN_rid() - Get the resource id in State-change notification
383c89105c9SRoy Pledge  */
qbman_result_SCN_rid(const struct dpaa2_dq * scn)384c89105c9SRoy Pledge static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
385c89105c9SRoy Pledge {
386c89105c9SRoy Pledge 	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
387c89105c9SRoy Pledge }
388c89105c9SRoy Pledge 
389c89105c9SRoy Pledge /**
390c89105c9SRoy Pledge  * qbman_result_SCN_ctx() - Get the context data in State-change notification
391c89105c9SRoy Pledge  */
qbman_result_SCN_ctx(const struct dpaa2_dq * scn)392c89105c9SRoy Pledge static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
393c89105c9SRoy Pledge {
394c89105c9SRoy Pledge 	return le64_to_cpu(scn->scn.ctx);
395c89105c9SRoy Pledge }
396c89105c9SRoy Pledge 
397c89105c9SRoy Pledge /**
398c89105c9SRoy Pledge  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
399c89105c9SRoy Pledge  * @s:    the software portal object
400c89105c9SRoy Pledge  * @fqid: the index of frame queue to be scheduled
401c89105c9SRoy Pledge  *
402c89105c9SRoy Pledge  * There are a couple of different ways that a FQ can end up parked state,
403c89105c9SRoy Pledge  * This schedules it.
404c89105c9SRoy Pledge  *
405c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
406c89105c9SRoy Pledge  */
qbman_swp_fq_schedule(struct qbman_swp * s,u32 fqid)407c89105c9SRoy Pledge static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
408c89105c9SRoy Pledge {
409c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
410c89105c9SRoy Pledge }
411c89105c9SRoy Pledge 
412c89105c9SRoy Pledge /**
413c89105c9SRoy Pledge  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
414c89105c9SRoy Pledge  * @s:    the software portal object
415c89105c9SRoy Pledge  * @fqid: the index of frame queue to be forced
416c89105c9SRoy Pledge  *
417c89105c9SRoy Pledge  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
418c89105c9SRoy Pledge  * and thus be available for selection by any channel-dequeuing behaviour (push
419c89105c9SRoy Pledge  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
420c89105c9SRoy Pledge  * empty at the time this happens, the resulting dq_entry will have no FD.
421c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
422c89105c9SRoy Pledge  *
423c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
424c89105c9SRoy Pledge  */
qbman_swp_fq_force(struct qbman_swp * s,u32 fqid)425c89105c9SRoy Pledge static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
426c89105c9SRoy Pledge {
427c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
428c89105c9SRoy Pledge }
429c89105c9SRoy Pledge 
430c89105c9SRoy Pledge /**
431c89105c9SRoy Pledge  * qbman_swp_fq_xon() - sets FQ flow-control to XON
432c89105c9SRoy Pledge  * @s:    the software portal object
433c89105c9SRoy Pledge  * @fqid: the index of frame queue
434c89105c9SRoy Pledge  *
435c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
436c89105c9SRoy Pledge  *
437c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
438c89105c9SRoy Pledge  */
qbman_swp_fq_xon(struct qbman_swp * s,u32 fqid)439c89105c9SRoy Pledge static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
440c89105c9SRoy Pledge {
441c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
442c89105c9SRoy Pledge }
443c89105c9SRoy Pledge 
444c89105c9SRoy Pledge /**
445c89105c9SRoy Pledge  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
446c89105c9SRoy Pledge  * @s:    the software portal object
447c89105c9SRoy Pledge  * @fqid: the index of frame queue
448c89105c9SRoy Pledge  *
449c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
450c89105c9SRoy Pledge  * XOFF FQs will remain in the tenatively-scheduled state, even when
451c89105c9SRoy Pledge  * non-empty, meaning they won't be selected for scheduled dequeuing.
452c89105c9SRoy Pledge  * If a FQ is changed to XOFF after it had already become truly-scheduled
453c89105c9SRoy Pledge  * to a channel, and a pull dequeue of that channel occurs that selects
454c89105c9SRoy Pledge  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
455c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
456c89105c9SRoy Pledge  *
457c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
458c89105c9SRoy Pledge  */
qbman_swp_fq_xoff(struct qbman_swp * s,u32 fqid)459c89105c9SRoy Pledge static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
460c89105c9SRoy Pledge {
461c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
462c89105c9SRoy Pledge }
463c89105c9SRoy Pledge 
464c89105c9SRoy Pledge /* If the user has been allocated a channel object that is going to generate
465c89105c9SRoy Pledge  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
466c89105c9SRoy Pledge  * necessary.
467c89105c9SRoy Pledge  *
468c89105c9SRoy Pledge  * CDAN-enabled channels only generate a single CDAN notification, after which
469c89105c9SRoy Pledge  * they need to be reenabled before they'll generate another. The idea is
470c89105c9SRoy Pledge  * that pull dequeuing will occur in reaction to the CDAN, followed by a
471c89105c9SRoy Pledge  * reenable step. Each function generates a distinct command to hardware, so a
472c89105c9SRoy Pledge  * combination function is provided if the user wishes to modify the "context"
473c89105c9SRoy Pledge  * (which shows up in each CDAN message) each time they reenable, as a single
474c89105c9SRoy Pledge  * command to hardware.
475c89105c9SRoy Pledge  */
476c89105c9SRoy Pledge 
477c89105c9SRoy Pledge /**
478c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context() - Set CDAN context
479c89105c9SRoy Pledge  * @s:         the software portal object
480c89105c9SRoy Pledge  * @channelid: the channel index
481c89105c9SRoy Pledge  * @ctx:       the context to be set in CDAN
482c89105c9SRoy Pledge  *
483c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
484c89105c9SRoy Pledge  */
qbman_swp_CDAN_set_context(struct qbman_swp * s,u16 channelid,u64 ctx)485c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
486c89105c9SRoy Pledge 					     u64 ctx)
487c89105c9SRoy Pledge {
488c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
489c89105c9SRoy Pledge 				  CODE_CDAN_WE_CTX,
490c89105c9SRoy Pledge 				  0, ctx);
491c89105c9SRoy Pledge }
492c89105c9SRoy Pledge 
493c89105c9SRoy Pledge /**
494c89105c9SRoy Pledge  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
495c89105c9SRoy Pledge  * @s:         the software portal object
496c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
497c89105c9SRoy Pledge  *
498c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
499c89105c9SRoy Pledge  */
qbman_swp_CDAN_enable(struct qbman_swp * s,u16 channelid)500c89105c9SRoy Pledge static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
501c89105c9SRoy Pledge {
502c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
503c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
504c89105c9SRoy Pledge 				  1, 0);
505c89105c9SRoy Pledge }
506c89105c9SRoy Pledge 
507c89105c9SRoy Pledge /**
508c89105c9SRoy Pledge  * qbman_swp_CDAN_disable() - disable CDAN for the channel
509c89105c9SRoy Pledge  * @s:         the software portal object
510c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
511c89105c9SRoy Pledge  *
512c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
513c89105c9SRoy Pledge  */
qbman_swp_CDAN_disable(struct qbman_swp * s,u16 channelid)514c89105c9SRoy Pledge static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
515c89105c9SRoy Pledge {
516c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
517c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
518c89105c9SRoy Pledge 				  0, 0);
519c89105c9SRoy Pledge }
520c89105c9SRoy Pledge 
521c89105c9SRoy Pledge /**
522c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
523c89105c9SRoy Pledge  * @s:         the software portal object
524c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
525c89105c9SRoy Pledge  * @ctx:i      the context set in CDAN
526c89105c9SRoy Pledge  *
527c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
528c89105c9SRoy Pledge  */
qbman_swp_CDAN_set_context_enable(struct qbman_swp * s,u16 channelid,u64 ctx)529c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
530c89105c9SRoy Pledge 						    u16 channelid,
531c89105c9SRoy Pledge 						    u64 ctx)
532c89105c9SRoy Pledge {
533c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
534c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
535c89105c9SRoy Pledge 				  1, ctx);
536c89105c9SRoy Pledge }
537c89105c9SRoy Pledge 
538c89105c9SRoy Pledge /* Wraps up submit + poll-for-result */
qbman_swp_mc_complete(struct qbman_swp * swp,void * cmd,u8 cmd_verb)539c89105c9SRoy Pledge static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
540c89105c9SRoy Pledge 					  u8 cmd_verb)
541c89105c9SRoy Pledge {
542d31beda2SYouri Querry 	int loopvar = 2000;
543c89105c9SRoy Pledge 
544c89105c9SRoy Pledge 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
545c89105c9SRoy Pledge 
546c89105c9SRoy Pledge 	do {
547c89105c9SRoy Pledge 		cmd = qbman_swp_mc_result(swp);
548c89105c9SRoy Pledge 	} while (!cmd && loopvar--);
549c89105c9SRoy Pledge 
550c89105c9SRoy Pledge 	WARN_ON(!loopvar);
551c89105c9SRoy Pledge 
552c89105c9SRoy Pledge 	return cmd;
553c89105c9SRoy Pledge }
554c89105c9SRoy Pledge 
555e80081c3SRoy Pledge /* Query APIs */
556e80081c3SRoy Pledge struct qbman_fq_query_np_rslt {
557e80081c3SRoy Pledge 	u8 verb;
558e80081c3SRoy Pledge 	u8 rslt;
559e80081c3SRoy Pledge 	u8 st1;
560e80081c3SRoy Pledge 	u8 st2;
561e80081c3SRoy Pledge 	u8 reserved[2];
562e80081c3SRoy Pledge 	__le16 od1_sfdr;
563e80081c3SRoy Pledge 	__le16 od2_sfdr;
564e80081c3SRoy Pledge 	__le16 od3_sfdr;
565e80081c3SRoy Pledge 	__le16 ra1_sfdr;
566e80081c3SRoy Pledge 	__le16 ra2_sfdr;
567e80081c3SRoy Pledge 	__le32 pfdr_hptr;
568e80081c3SRoy Pledge 	__le32 pfdr_tptr;
569e80081c3SRoy Pledge 	__le32 frm_cnt;
570e80081c3SRoy Pledge 	__le32 byte_cnt;
571e80081c3SRoy Pledge 	__le16 ics_surp;
572e80081c3SRoy Pledge 	u8 is;
573e80081c3SRoy Pledge 	u8 reserved2[29];
574e80081c3SRoy Pledge };
575e80081c3SRoy Pledge 
576e80081c3SRoy Pledge int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
577e80081c3SRoy Pledge 			 struct qbman_fq_query_np_rslt *r);
578e80081c3SRoy Pledge u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
579e80081c3SRoy Pledge u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
580e80081c3SRoy Pledge 
581e80081c3SRoy Pledge struct qbman_bp_query_rslt {
582e80081c3SRoy Pledge 	u8 verb;
583e80081c3SRoy Pledge 	u8 rslt;
584e80081c3SRoy Pledge 	u8 reserved[4];
585e80081c3SRoy Pledge 	u8 bdi;
586e80081c3SRoy Pledge 	u8 state;
587e80081c3SRoy Pledge 	__le32 fill;
588e80081c3SRoy Pledge 	__le32 hdotr;
589e80081c3SRoy Pledge 	__le16 swdet;
590e80081c3SRoy Pledge 	__le16 swdxt;
591e80081c3SRoy Pledge 	__le16 hwdet;
592e80081c3SRoy Pledge 	__le16 hwdxt;
593e80081c3SRoy Pledge 	__le16 swset;
594e80081c3SRoy Pledge 	__le16 swsxt;
595e80081c3SRoy Pledge 	__le16 vbpid;
596e80081c3SRoy Pledge 	__le16 icid;
597e80081c3SRoy Pledge 	__le64 bpscn_addr;
598e80081c3SRoy Pledge 	__le64 bpscn_ctx;
599e80081c3SRoy Pledge 	__le16 hw_targ;
600e80081c3SRoy Pledge 	u8 dbe;
601e80081c3SRoy Pledge 	u8 reserved2;
602e80081c3SRoy Pledge 	u8 sdcnt;
603e80081c3SRoy Pledge 	u8 hdcnt;
604e80081c3SRoy Pledge 	u8 sscnt;
605e80081c3SRoy Pledge 	u8 reserved3[9];
606e80081c3SRoy Pledge };
607e80081c3SRoy Pledge 
608e80081c3SRoy Pledge int qbman_bp_query(struct qbman_swp *s, u16 bpid,
609e80081c3SRoy Pledge 		   struct qbman_bp_query_rslt *r);
610e80081c3SRoy Pledge 
611e80081c3SRoy Pledge u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
612e80081c3SRoy Pledge 
613b46fe745SYouri Querry /**
614b46fe745SYouri Querry  * qbman_swp_release() - Issue a buffer release command
615b46fe745SYouri Querry  * @s:           the software portal object
616b46fe745SYouri Querry  * @d:           the release descriptor
617b46fe745SYouri Querry  * @buffers:     a pointer pointing to the buffer address to be released
618b46fe745SYouri Querry  * @num_buffers: number of buffers to be released,  must be less than 8
619b46fe745SYouri Querry  *
620b46fe745SYouri Querry  * Return 0 for success, -EBUSY if the release command ring is not ready.
621b46fe745SYouri Querry  */
qbman_swp_release(struct qbman_swp * s,const struct qbman_release_desc * d,const u64 * buffers,unsigned int num_buffers)622b46fe745SYouri Querry static inline int qbman_swp_release(struct qbman_swp *s,
623b46fe745SYouri Querry 				    const struct qbman_release_desc *d,
624b46fe745SYouri Querry 				    const u64 *buffers,
625b46fe745SYouri Querry 				    unsigned int num_buffers)
626b46fe745SYouri Querry {
627b46fe745SYouri Querry 	return qbman_swp_release_ptr(s, d, buffers, num_buffers);
628b46fe745SYouri Querry }
629b46fe745SYouri Querry 
630b46fe745SYouri Querry /**
631b46fe745SYouri Querry  * qbman_swp_pull() - Issue the pull dequeue command
632b46fe745SYouri Querry  * @s: the software portal object
633b46fe745SYouri Querry  * @d: the software portal descriptor which has been configured with
634b46fe745SYouri Querry  *     the set of qbman_pull_desc_set_*() calls
635b46fe745SYouri Querry  *
636b46fe745SYouri Querry  * Return 0 for success, and -EBUSY if the software portal is not ready
637b46fe745SYouri Querry  * to do pull dequeue.
638b46fe745SYouri Querry  */
qbman_swp_pull(struct qbman_swp * s,struct qbman_pull_desc * d)639b46fe745SYouri Querry static inline int qbman_swp_pull(struct qbman_swp *s,
640b46fe745SYouri Querry 				 struct qbman_pull_desc *d)
641b46fe745SYouri Querry {
642b46fe745SYouri Querry 	return qbman_swp_pull_ptr(s, d);
643b46fe745SYouri Querry }
644b46fe745SYouri Querry 
645b46fe745SYouri Querry /**
646b46fe745SYouri Querry  * qbman_swp_dqrr_next() - Get an valid DQRR entry
647b46fe745SYouri Querry  * @s: the software portal object
648b46fe745SYouri Querry  *
649b46fe745SYouri Querry  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
650b46fe745SYouri Querry  * only once, so repeated calls can return a sequence of DQRR entries, without
651b46fe745SYouri Querry  * requiring they be consumed immediately or in any particular order.
652b46fe745SYouri Querry  */
qbman_swp_dqrr_next(struct qbman_swp * s)653b46fe745SYouri Querry static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
654b46fe745SYouri Querry {
655b46fe745SYouri Querry 	return qbman_swp_dqrr_next_ptr(s);
656b46fe745SYouri Querry }
657b46fe745SYouri Querry 
658ed1d2143SIoana Ciornei int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
659ed1d2143SIoana Ciornei 				 u32 irq_holdoff);
660ed1d2143SIoana Ciornei 
661ed1d2143SIoana Ciornei void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
662ed1d2143SIoana Ciornei 				  u32 *irq_holdoff);
663ed1d2143SIoana Ciornei 
664c89105c9SRoy Pledge #endif /* __FSL_QBMAN_PORTAL_H */
665