1c89105c9SRoy Pledge /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2c89105c9SRoy Pledge /*
3c89105c9SRoy Pledge  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4d31beda2SYouri Querry  * Copyright 2016-2019 NXP
5c89105c9SRoy Pledge  *
6c89105c9SRoy Pledge  */
7c89105c9SRoy Pledge #ifndef __FSL_QBMAN_PORTAL_H
8c89105c9SRoy Pledge #define __FSL_QBMAN_PORTAL_H
9c89105c9SRoy Pledge 
10c89105c9SRoy Pledge #include <soc/fsl/dpaa2-fd.h>
11c89105c9SRoy Pledge 
12c89105c9SRoy Pledge struct dpaa2_dq;
13c89105c9SRoy Pledge struct qbman_swp;
14c89105c9SRoy Pledge 
15c89105c9SRoy Pledge /* qbman software portal descriptor structure */
16c89105c9SRoy Pledge struct qbman_swp_desc {
17c89105c9SRoy Pledge 	void *cena_bar; /* Cache-enabled portal base address */
18c89105c9SRoy Pledge 	void __iomem *cinh_bar; /* Cache-inhibited portal base address */
19c89105c9SRoy Pledge 	u32 qman_version;
20c89105c9SRoy Pledge };
21c89105c9SRoy Pledge 
22c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQRI 0x01
23c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_EQDI 0x02
24c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_DQRI 0x04
25c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCRI 0x08
26c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_RCDI 0x10
27c89105c9SRoy Pledge #define QBMAN_SWP_INTERRUPT_VDCI 0x20
28c89105c9SRoy Pledge 
29c89105c9SRoy Pledge /* the structure for pull dequeue descriptor */
30c89105c9SRoy Pledge struct qbman_pull_desc {
31c89105c9SRoy Pledge 	u8 verb;
32c89105c9SRoy Pledge 	u8 numf;
33c89105c9SRoy Pledge 	u8 tok;
34c89105c9SRoy Pledge 	u8 reserved;
35c89105c9SRoy Pledge 	__le32 dq_src;
36c89105c9SRoy Pledge 	__le64 rsp_addr;
37c89105c9SRoy Pledge 	u64 rsp_addr_virt;
38c89105c9SRoy Pledge 	u8 padding[40];
39c89105c9SRoy Pledge };
40c89105c9SRoy Pledge 
41c89105c9SRoy Pledge enum qbman_pull_type_e {
42c89105c9SRoy Pledge 	/* dequeue with priority precedence, respect intra-class scheduling */
43c89105c9SRoy Pledge 	qbman_pull_type_prio = 1,
44c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, respect ICS */
45c89105c9SRoy Pledge 	qbman_pull_type_active,
46c89105c9SRoy Pledge 	/* dequeue with active FQ precedence, no ICS */
47c89105c9SRoy Pledge 	qbman_pull_type_active_noics
48c89105c9SRoy Pledge };
49c89105c9SRoy Pledge 
50c89105c9SRoy Pledge /* Definitions for parsing dequeue entries */
51c89105c9SRoy Pledge #define QBMAN_RESULT_MASK      0x7f
52c89105c9SRoy Pledge #define QBMAN_RESULT_DQ        0x60
53c89105c9SRoy Pledge #define QBMAN_RESULT_FQRN      0x21
54c89105c9SRoy Pledge #define QBMAN_RESULT_FQRNI     0x22
55c89105c9SRoy Pledge #define QBMAN_RESULT_FQPN      0x24
56c89105c9SRoy Pledge #define QBMAN_RESULT_FQDAN     0x25
57c89105c9SRoy Pledge #define QBMAN_RESULT_CDAN      0x26
58c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_MEM  0x27
59c89105c9SRoy Pledge #define QBMAN_RESULT_CGCU      0x28
60c89105c9SRoy Pledge #define QBMAN_RESULT_BPSCN     0x29
61c89105c9SRoy Pledge #define QBMAN_RESULT_CSCN_WQ   0x2a
62c89105c9SRoy Pledge 
63c89105c9SRoy Pledge /* QBMan FQ management command codes */
64c89105c9SRoy Pledge #define QBMAN_FQ_SCHEDULE	0x48
65c89105c9SRoy Pledge #define QBMAN_FQ_FORCE		0x49
66c89105c9SRoy Pledge #define QBMAN_FQ_XON		0x4d
67c89105c9SRoy Pledge #define QBMAN_FQ_XOFF		0x4e
68c89105c9SRoy Pledge 
69c89105c9SRoy Pledge /* structure of enqueue descriptor */
70c89105c9SRoy Pledge struct qbman_eq_desc {
71c89105c9SRoy Pledge 	u8 verb;
72c89105c9SRoy Pledge 	u8 dca;
73c89105c9SRoy Pledge 	__le16 seqnum;
74c89105c9SRoy Pledge 	__le16 orpid;
75c89105c9SRoy Pledge 	__le16 reserved1;
76c89105c9SRoy Pledge 	__le32 tgtid;
77c89105c9SRoy Pledge 	__le32 tag;
78c89105c9SRoy Pledge 	__le16 qdbin;
79c89105c9SRoy Pledge 	u8 qpri;
80c89105c9SRoy Pledge 	u8 reserved[3];
81c89105c9SRoy Pledge 	u8 wae;
82c89105c9SRoy Pledge 	u8 rspid;
83c89105c9SRoy Pledge 	__le64 rsp_addr;
84c89105c9SRoy Pledge 	u8 fd[32];
85c89105c9SRoy Pledge };
86c89105c9SRoy Pledge 
87c89105c9SRoy Pledge /* buffer release descriptor */
88c89105c9SRoy Pledge struct qbman_release_desc {
89c89105c9SRoy Pledge 	u8 verb;
90c89105c9SRoy Pledge 	u8 reserved;
91c89105c9SRoy Pledge 	__le16 bpid;
92c89105c9SRoy Pledge 	__le32 reserved2;
93c89105c9SRoy Pledge 	__le64 buf[7];
94c89105c9SRoy Pledge };
95c89105c9SRoy Pledge 
96c89105c9SRoy Pledge /* Management command result codes */
97c89105c9SRoy Pledge #define QBMAN_MC_RSLT_OK      0xf0
98c89105c9SRoy Pledge 
99c89105c9SRoy Pledge #define CODE_CDAN_WE_EN    0x1
100c89105c9SRoy Pledge #define CODE_CDAN_WE_CTX   0x4
101c89105c9SRoy Pledge 
102c89105c9SRoy Pledge /* portal data structure */
103c89105c9SRoy Pledge struct qbman_swp {
104c89105c9SRoy Pledge 	const struct qbman_swp_desc *desc;
105c89105c9SRoy Pledge 	void *addr_cena;
106c89105c9SRoy Pledge 	void __iomem *addr_cinh;
107c89105c9SRoy Pledge 
108c89105c9SRoy Pledge 	/* Management commands */
109c89105c9SRoy Pledge 	struct {
110c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
111c89105c9SRoy Pledge 	} mc;
112c89105c9SRoy Pledge 
1135842efa4SRoy Pledge 	/* Management response */
1145842efa4SRoy Pledge 	struct {
1155842efa4SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
1165842efa4SRoy Pledge 	} mr;
1175842efa4SRoy Pledge 
118c89105c9SRoy Pledge 	/* Push dequeues */
119c89105c9SRoy Pledge 	u32 sdq;
120c89105c9SRoy Pledge 
121c89105c9SRoy Pledge 	/* Volatile dequeues */
122c89105c9SRoy Pledge 	struct {
123c89105c9SRoy Pledge 		atomic_t available; /* indicates if a command can be sent */
124c89105c9SRoy Pledge 		u32 valid_bit; /* 0x00 or 0x80 */
125c89105c9SRoy Pledge 		struct dpaa2_dq *storage; /* NULL if DQRR */
126c89105c9SRoy Pledge 	} vdq;
127c89105c9SRoy Pledge 
128c89105c9SRoy Pledge 	/* DQRR */
129c89105c9SRoy Pledge 	struct {
130c89105c9SRoy Pledge 		u32 next_idx;
131c89105c9SRoy Pledge 		u32 valid_bit;
132c89105c9SRoy Pledge 		u8 dqrr_size;
133c89105c9SRoy Pledge 		int reset_bug; /* indicates dqrr reset workaround is needed */
134c89105c9SRoy Pledge 	} dqrr;
135c89105c9SRoy Pledge };
136c89105c9SRoy Pledge 
137c89105c9SRoy Pledge struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
138c89105c9SRoy Pledge void qbman_swp_finish(struct qbman_swp *p);
139c89105c9SRoy Pledge u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
140c89105c9SRoy Pledge void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
141c89105c9SRoy Pledge u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
142c89105c9SRoy Pledge void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
143c89105c9SRoy Pledge int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
144c89105c9SRoy Pledge void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
145c89105c9SRoy Pledge 
146c89105c9SRoy Pledge void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
147c89105c9SRoy Pledge void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
148c89105c9SRoy Pledge 
149c89105c9SRoy Pledge void qbman_pull_desc_clear(struct qbman_pull_desc *d);
150c89105c9SRoy Pledge void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
151c89105c9SRoy Pledge 				 struct dpaa2_dq *storage,
152c89105c9SRoy Pledge 				 dma_addr_t storage_phys,
153c89105c9SRoy Pledge 				 int stash);
154c89105c9SRoy Pledge void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
155c89105c9SRoy Pledge void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
156c89105c9SRoy Pledge void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
157c89105c9SRoy Pledge 			    enum qbman_pull_type_e dct);
158c89105c9SRoy Pledge void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
159c89105c9SRoy Pledge 				 enum qbman_pull_type_e dct);
160c89105c9SRoy Pledge 
161c89105c9SRoy Pledge int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
162c89105c9SRoy Pledge 
163c89105c9SRoy Pledge const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
164c89105c9SRoy Pledge void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
165c89105c9SRoy Pledge 
166c89105c9SRoy Pledge int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
167c89105c9SRoy Pledge 
168c89105c9SRoy Pledge void qbman_eq_desc_clear(struct qbman_eq_desc *d);
169c89105c9SRoy Pledge void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
170c89105c9SRoy Pledge void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
171c89105c9SRoy Pledge void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
172c89105c9SRoy Pledge void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
173c89105c9SRoy Pledge 			  u32 qd_bin, u32 qd_prio);
174c89105c9SRoy Pledge 
175c89105c9SRoy Pledge int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
176c89105c9SRoy Pledge 		      const struct dpaa2_fd *fd);
177c89105c9SRoy Pledge 
178c89105c9SRoy Pledge void qbman_release_desc_clear(struct qbman_release_desc *d);
179c89105c9SRoy Pledge void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
180c89105c9SRoy Pledge void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
181c89105c9SRoy Pledge 
182c89105c9SRoy Pledge int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
183c89105c9SRoy Pledge 		      const u64 *buffers, unsigned int num_buffers);
184c89105c9SRoy Pledge int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
185c89105c9SRoy Pledge 		      unsigned int num_buffers);
186c89105c9SRoy Pledge int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
187c89105c9SRoy Pledge 			   u8 alt_fq_verb);
188c89105c9SRoy Pledge int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
189c89105c9SRoy Pledge 		       u8 we_mask, u8 cdan_en,
190c89105c9SRoy Pledge 		       u64 ctx);
191c89105c9SRoy Pledge 
192c89105c9SRoy Pledge void *qbman_swp_mc_start(struct qbman_swp *p);
193c89105c9SRoy Pledge void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
194c89105c9SRoy Pledge void *qbman_swp_mc_result(struct qbman_swp *p);
195c89105c9SRoy Pledge 
196c89105c9SRoy Pledge /**
197c89105c9SRoy Pledge  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
198c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
199c89105c9SRoy Pledge  *
200c89105c9SRoy Pledge  * DQRR entries may contain non-dequeue results, ie. notifications
201c89105c9SRoy Pledge  */
202c89105c9SRoy Pledge static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
203c89105c9SRoy Pledge {
204c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
205c89105c9SRoy Pledge }
206c89105c9SRoy Pledge 
207c89105c9SRoy Pledge /**
208c89105c9SRoy Pledge  * qbman_result_is_SCN() - Check the dequeue result is notification or not
209c89105c9SRoy Pledge  * @dq: the dequeue result to be checked
210c89105c9SRoy Pledge  *
211c89105c9SRoy Pledge  */
212c89105c9SRoy Pledge static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
213c89105c9SRoy Pledge {
214c89105c9SRoy Pledge 	return !qbman_result_is_DQ(dq);
215c89105c9SRoy Pledge }
216c89105c9SRoy Pledge 
217c89105c9SRoy Pledge /* FQ Data Availability */
218c89105c9SRoy Pledge static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
219c89105c9SRoy Pledge {
220c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
221c89105c9SRoy Pledge }
222c89105c9SRoy Pledge 
223c89105c9SRoy Pledge /* Channel Data Availability */
224c89105c9SRoy Pledge static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
225c89105c9SRoy Pledge {
226c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
227c89105c9SRoy Pledge }
228c89105c9SRoy Pledge 
229c89105c9SRoy Pledge /* Congestion State Change */
230c89105c9SRoy Pledge static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
231c89105c9SRoy Pledge {
232c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
233c89105c9SRoy Pledge }
234c89105c9SRoy Pledge 
235c89105c9SRoy Pledge /* Buffer Pool State Change */
236c89105c9SRoy Pledge static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
237c89105c9SRoy Pledge {
238c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
239c89105c9SRoy Pledge }
240c89105c9SRoy Pledge 
241c89105c9SRoy Pledge /* Congestion Group Count Update */
242c89105c9SRoy Pledge static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
243c89105c9SRoy Pledge {
244c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
245c89105c9SRoy Pledge }
246c89105c9SRoy Pledge 
247c89105c9SRoy Pledge /* Retirement */
248c89105c9SRoy Pledge static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
249c89105c9SRoy Pledge {
250c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
251c89105c9SRoy Pledge }
252c89105c9SRoy Pledge 
253c89105c9SRoy Pledge /* Retirement Immediate */
254c89105c9SRoy Pledge static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
255c89105c9SRoy Pledge {
256c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
257c89105c9SRoy Pledge }
258c89105c9SRoy Pledge 
259c89105c9SRoy Pledge  /* Park */
260c89105c9SRoy Pledge static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
261c89105c9SRoy Pledge {
262c89105c9SRoy Pledge 	return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
263c89105c9SRoy Pledge }
264c89105c9SRoy Pledge 
265c89105c9SRoy Pledge /**
266c89105c9SRoy Pledge  * qbman_result_SCN_state() - Get the state field in State-change notification
267c89105c9SRoy Pledge  */
268c89105c9SRoy Pledge static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
269c89105c9SRoy Pledge {
270c89105c9SRoy Pledge 	return scn->scn.state;
271c89105c9SRoy Pledge }
272c89105c9SRoy Pledge 
273c89105c9SRoy Pledge #define SCN_RID_MASK 0x00FFFFFF
274c89105c9SRoy Pledge 
275c89105c9SRoy Pledge /**
276c89105c9SRoy Pledge  * qbman_result_SCN_rid() - Get the resource id in State-change notification
277c89105c9SRoy Pledge  */
278c89105c9SRoy Pledge static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
279c89105c9SRoy Pledge {
280c89105c9SRoy Pledge 	return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
281c89105c9SRoy Pledge }
282c89105c9SRoy Pledge 
283c89105c9SRoy Pledge /**
284c89105c9SRoy Pledge  * qbman_result_SCN_ctx() - Get the context data in State-change notification
285c89105c9SRoy Pledge  */
286c89105c9SRoy Pledge static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
287c89105c9SRoy Pledge {
288c89105c9SRoy Pledge 	return le64_to_cpu(scn->scn.ctx);
289c89105c9SRoy Pledge }
290c89105c9SRoy Pledge 
291c89105c9SRoy Pledge /**
292c89105c9SRoy Pledge  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
293c89105c9SRoy Pledge  * @s:    the software portal object
294c89105c9SRoy Pledge  * @fqid: the index of frame queue to be scheduled
295c89105c9SRoy Pledge  *
296c89105c9SRoy Pledge  * There are a couple of different ways that a FQ can end up parked state,
297c89105c9SRoy Pledge  * This schedules it.
298c89105c9SRoy Pledge  *
299c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
300c89105c9SRoy Pledge  */
301c89105c9SRoy Pledge static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
302c89105c9SRoy Pledge {
303c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
304c89105c9SRoy Pledge }
305c89105c9SRoy Pledge 
306c89105c9SRoy Pledge /**
307c89105c9SRoy Pledge  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
308c89105c9SRoy Pledge  * @s:    the software portal object
309c89105c9SRoy Pledge  * @fqid: the index of frame queue to be forced
310c89105c9SRoy Pledge  *
311c89105c9SRoy Pledge  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
312c89105c9SRoy Pledge  * and thus be available for selection by any channel-dequeuing behaviour (push
313c89105c9SRoy Pledge  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
314c89105c9SRoy Pledge  * empty at the time this happens, the resulting dq_entry will have no FD.
315c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
316c89105c9SRoy Pledge  *
317c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
318c89105c9SRoy Pledge  */
319c89105c9SRoy Pledge static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
320c89105c9SRoy Pledge {
321c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
322c89105c9SRoy Pledge }
323c89105c9SRoy Pledge 
324c89105c9SRoy Pledge /**
325c89105c9SRoy Pledge  * qbman_swp_fq_xon() - sets FQ flow-control to XON
326c89105c9SRoy Pledge  * @s:    the software portal object
327c89105c9SRoy Pledge  * @fqid: the index of frame queue
328c89105c9SRoy Pledge  *
329c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
330c89105c9SRoy Pledge  *
331c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
332c89105c9SRoy Pledge  */
333c89105c9SRoy Pledge static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
334c89105c9SRoy Pledge {
335c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
336c89105c9SRoy Pledge }
337c89105c9SRoy Pledge 
338c89105c9SRoy Pledge /**
339c89105c9SRoy Pledge  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
340c89105c9SRoy Pledge  * @s:    the software portal object
341c89105c9SRoy Pledge  * @fqid: the index of frame queue
342c89105c9SRoy Pledge  *
343c89105c9SRoy Pledge  * This setting doesn't affect enqueues to the FQ, just dequeues.
344c89105c9SRoy Pledge  * XOFF FQs will remain in the tenatively-scheduled state, even when
345c89105c9SRoy Pledge  * non-empty, meaning they won't be selected for scheduled dequeuing.
346c89105c9SRoy Pledge  * If a FQ is changed to XOFF after it had already become truly-scheduled
347c89105c9SRoy Pledge  * to a channel, and a pull dequeue of that channel occurs that selects
348c89105c9SRoy Pledge  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
349c89105c9SRoy Pledge  * (qbman_result_DQ_fd() will return NULL.)
350c89105c9SRoy Pledge  *
351c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
352c89105c9SRoy Pledge  */
353c89105c9SRoy Pledge static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
354c89105c9SRoy Pledge {
355c89105c9SRoy Pledge 	return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
356c89105c9SRoy Pledge }
357c89105c9SRoy Pledge 
358c89105c9SRoy Pledge /* If the user has been allocated a channel object that is going to generate
359c89105c9SRoy Pledge  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
360c89105c9SRoy Pledge  * necessary.
361c89105c9SRoy Pledge  *
362c89105c9SRoy Pledge  * CDAN-enabled channels only generate a single CDAN notification, after which
363c89105c9SRoy Pledge  * they need to be reenabled before they'll generate another. The idea is
364c89105c9SRoy Pledge  * that pull dequeuing will occur in reaction to the CDAN, followed by a
365c89105c9SRoy Pledge  * reenable step. Each function generates a distinct command to hardware, so a
366c89105c9SRoy Pledge  * combination function is provided if the user wishes to modify the "context"
367c89105c9SRoy Pledge  * (which shows up in each CDAN message) each time they reenable, as a single
368c89105c9SRoy Pledge  * command to hardware.
369c89105c9SRoy Pledge  */
370c89105c9SRoy Pledge 
371c89105c9SRoy Pledge /**
372c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context() - Set CDAN context
373c89105c9SRoy Pledge  * @s:         the software portal object
374c89105c9SRoy Pledge  * @channelid: the channel index
375c89105c9SRoy Pledge  * @ctx:       the context to be set in CDAN
376c89105c9SRoy Pledge  *
377c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
378c89105c9SRoy Pledge  */
379c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
380c89105c9SRoy Pledge 					     u64 ctx)
381c89105c9SRoy Pledge {
382c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
383c89105c9SRoy Pledge 				  CODE_CDAN_WE_CTX,
384c89105c9SRoy Pledge 				  0, ctx);
385c89105c9SRoy Pledge }
386c89105c9SRoy Pledge 
387c89105c9SRoy Pledge /**
388c89105c9SRoy Pledge  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
389c89105c9SRoy Pledge  * @s:         the software portal object
390c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
391c89105c9SRoy Pledge  *
392c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
393c89105c9SRoy Pledge  */
394c89105c9SRoy Pledge static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
395c89105c9SRoy Pledge {
396c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
397c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
398c89105c9SRoy Pledge 				  1, 0);
399c89105c9SRoy Pledge }
400c89105c9SRoy Pledge 
401c89105c9SRoy Pledge /**
402c89105c9SRoy Pledge  * qbman_swp_CDAN_disable() - disable CDAN for the channel
403c89105c9SRoy Pledge  * @s:         the software portal object
404c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
405c89105c9SRoy Pledge  *
406c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
407c89105c9SRoy Pledge  */
408c89105c9SRoy Pledge static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
409c89105c9SRoy Pledge {
410c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
411c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN,
412c89105c9SRoy Pledge 				  0, 0);
413c89105c9SRoy Pledge }
414c89105c9SRoy Pledge 
415c89105c9SRoy Pledge /**
416c89105c9SRoy Pledge  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
417c89105c9SRoy Pledge  * @s:         the software portal object
418c89105c9SRoy Pledge  * @channelid: the index of the channel to generate CDAN
419c89105c9SRoy Pledge  * @ctx:i      the context set in CDAN
420c89105c9SRoy Pledge  *
421c89105c9SRoy Pledge  * Return 0 for success, or negative error code for failure.
422c89105c9SRoy Pledge  */
423c89105c9SRoy Pledge static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
424c89105c9SRoy Pledge 						    u16 channelid,
425c89105c9SRoy Pledge 						    u64 ctx)
426c89105c9SRoy Pledge {
427c89105c9SRoy Pledge 	return qbman_swp_CDAN_set(s, channelid,
428c89105c9SRoy Pledge 				  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
429c89105c9SRoy Pledge 				  1, ctx);
430c89105c9SRoy Pledge }
431c89105c9SRoy Pledge 
432c89105c9SRoy Pledge /* Wraps up submit + poll-for-result */
433c89105c9SRoy Pledge static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
434c89105c9SRoy Pledge 					  u8 cmd_verb)
435c89105c9SRoy Pledge {
436d31beda2SYouri Querry 	int loopvar = 2000;
437c89105c9SRoy Pledge 
438c89105c9SRoy Pledge 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
439c89105c9SRoy Pledge 
440c89105c9SRoy Pledge 	do {
441c89105c9SRoy Pledge 		cmd = qbman_swp_mc_result(swp);
442c89105c9SRoy Pledge 	} while (!cmd && loopvar--);
443c89105c9SRoy Pledge 
444c89105c9SRoy Pledge 	WARN_ON(!loopvar);
445c89105c9SRoy Pledge 
446c89105c9SRoy Pledge 	return cmd;
447c89105c9SRoy Pledge }
448c89105c9SRoy Pledge 
449e80081c3SRoy Pledge /* Query APIs */
450e80081c3SRoy Pledge struct qbman_fq_query_np_rslt {
451e80081c3SRoy Pledge 	u8 verb;
452e80081c3SRoy Pledge 	u8 rslt;
453e80081c3SRoy Pledge 	u8 st1;
454e80081c3SRoy Pledge 	u8 st2;
455e80081c3SRoy Pledge 	u8 reserved[2];
456e80081c3SRoy Pledge 	__le16 od1_sfdr;
457e80081c3SRoy Pledge 	__le16 od2_sfdr;
458e80081c3SRoy Pledge 	__le16 od3_sfdr;
459e80081c3SRoy Pledge 	__le16 ra1_sfdr;
460e80081c3SRoy Pledge 	__le16 ra2_sfdr;
461e80081c3SRoy Pledge 	__le32 pfdr_hptr;
462e80081c3SRoy Pledge 	__le32 pfdr_tptr;
463e80081c3SRoy Pledge 	__le32 frm_cnt;
464e80081c3SRoy Pledge 	__le32 byte_cnt;
465e80081c3SRoy Pledge 	__le16 ics_surp;
466e80081c3SRoy Pledge 	u8 is;
467e80081c3SRoy Pledge 	u8 reserved2[29];
468e80081c3SRoy Pledge };
469e80081c3SRoy Pledge 
470e80081c3SRoy Pledge int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
471e80081c3SRoy Pledge 			 struct qbman_fq_query_np_rslt *r);
472e80081c3SRoy Pledge u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
473e80081c3SRoy Pledge u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
474e80081c3SRoy Pledge 
475e80081c3SRoy Pledge struct qbman_bp_query_rslt {
476e80081c3SRoy Pledge 	u8 verb;
477e80081c3SRoy Pledge 	u8 rslt;
478e80081c3SRoy Pledge 	u8 reserved[4];
479e80081c3SRoy Pledge 	u8 bdi;
480e80081c3SRoy Pledge 	u8 state;
481e80081c3SRoy Pledge 	__le32 fill;
482e80081c3SRoy Pledge 	__le32 hdotr;
483e80081c3SRoy Pledge 	__le16 swdet;
484e80081c3SRoy Pledge 	__le16 swdxt;
485e80081c3SRoy Pledge 	__le16 hwdet;
486e80081c3SRoy Pledge 	__le16 hwdxt;
487e80081c3SRoy Pledge 	__le16 swset;
488e80081c3SRoy Pledge 	__le16 swsxt;
489e80081c3SRoy Pledge 	__le16 vbpid;
490e80081c3SRoy Pledge 	__le16 icid;
491e80081c3SRoy Pledge 	__le64 bpscn_addr;
492e80081c3SRoy Pledge 	__le64 bpscn_ctx;
493e80081c3SRoy Pledge 	__le16 hw_targ;
494e80081c3SRoy Pledge 	u8 dbe;
495e80081c3SRoy Pledge 	u8 reserved2;
496e80081c3SRoy Pledge 	u8 sdcnt;
497e80081c3SRoy Pledge 	u8 hdcnt;
498e80081c3SRoy Pledge 	u8 sscnt;
499e80081c3SRoy Pledge 	u8 reserved3[9];
500e80081c3SRoy Pledge };
501e80081c3SRoy Pledge 
502e80081c3SRoy Pledge int qbman_bp_query(struct qbman_swp *s, u16 bpid,
503e80081c3SRoy Pledge 		   struct qbman_bp_query_rslt *r);
504e80081c3SRoy Pledge 
505e80081c3SRoy Pledge u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
506e80081c3SRoy Pledge 
507c89105c9SRoy Pledge #endif /* __FSL_QBMAN_PORTAL_H */
508