1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2016-2019 NXP
5  *
6  */
7 
8 #include <asm/cacheflush.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <soc/fsl/dpaa2-global.h>
13 
14 #include "qbman-portal.h"
15 
16 /* All QBMan command and result structures use this "valid bit" encoding */
17 #define QB_VALID_BIT ((u32)0x80)
18 
19 /* QBMan portal management command codes */
20 #define QBMAN_MC_ACQUIRE       0x30
21 #define QBMAN_WQCHAN_CONFIGURE 0x46
22 
23 /* CINH register offsets */
24 #define QBMAN_CINH_SWP_EQCR_PI      0x800
25 #define QBMAN_CINH_SWP_EQCR_CI	    0x840
26 #define QBMAN_CINH_SWP_EQAR    0x8c0
27 #define QBMAN_CINH_SWP_CR_RT        0x900
28 #define QBMAN_CINH_SWP_VDQCR_RT     0x940
29 #define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
30 #define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
31 #define QBMAN_CINH_SWP_DQPI    0xa00
32 #define QBMAN_CINH_SWP_DQRR_ITR     0xa80
33 #define QBMAN_CINH_SWP_DCAP    0xac0
34 #define QBMAN_CINH_SWP_SDQCR   0xb00
35 #define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
36 #define QBMAN_CINH_SWP_RCR_PI       0xc00
37 #define QBMAN_CINH_SWP_RAR     0xcc0
38 #define QBMAN_CINH_SWP_ISR     0xe00
39 #define QBMAN_CINH_SWP_IER     0xe40
40 #define QBMAN_CINH_SWP_ISDR    0xe80
41 #define QBMAN_CINH_SWP_IIR     0xec0
42 #define QBMAN_CINH_SWP_ITPR    0xf40
43 
44 /* CENA register offsets */
45 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
46 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
47 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
48 #define QBMAN_CENA_SWP_CR      0x600
49 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
50 #define QBMAN_CENA_SWP_VDQCR   0x780
51 #define QBMAN_CENA_SWP_EQCR_CI 0x840
52 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
53 
54 /* CENA register offsets in memory-backed mode */
55 #define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
56 #define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
57 #define QBMAN_CENA_SWP_CR_MEM       0x1600
58 #define QBMAN_CENA_SWP_RR_MEM       0x1680
59 #define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
60 
61 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
62 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
63 
64 /* Define token used to determine if response written to memory is valid */
65 #define QMAN_DQ_TOKEN_VALID 1
66 
67 /* SDQCR attribute codes */
68 #define QB_SDQCR_FC_SHIFT   29
69 #define QB_SDQCR_FC_MASK    0x1
70 #define QB_SDQCR_DCT_SHIFT  24
71 #define QB_SDQCR_DCT_MASK   0x3
72 #define QB_SDQCR_TOK_SHIFT  16
73 #define QB_SDQCR_TOK_MASK   0xff
74 #define QB_SDQCR_SRC_SHIFT  0
75 #define QB_SDQCR_SRC_MASK   0xffff
76 
77 /* opaque token for static dequeues */
78 #define QMAN_SDQCR_TOKEN    0xbb
79 
80 #define QBMAN_EQCR_DCA_IDXMASK          0x0f
81 #define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
82 
83 #define EQ_DESC_SIZE_WITHOUT_FD 29
84 #define EQ_DESC_SIZE_FD_START 32
85 
86 enum qbman_sdqcr_dct {
87 	qbman_sdqcr_dct_null = 0,
88 	qbman_sdqcr_dct_prio_ics,
89 	qbman_sdqcr_dct_active_ics,
90 	qbman_sdqcr_dct_active
91 };
92 
93 enum qbman_sdqcr_fc {
94 	qbman_sdqcr_fc_one = 0,
95 	qbman_sdqcr_fc_up_to_3 = 1
96 };
97 
98 /* Internal Function declaration */
99 static int qbman_swp_enqueue_direct(struct qbman_swp *s,
100 				    const struct qbman_eq_desc *d,
101 				    const struct dpaa2_fd *fd);
102 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
103 				      const struct qbman_eq_desc *d,
104 				      const struct dpaa2_fd *fd);
105 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
106 					     const struct qbman_eq_desc *d,
107 					     const struct dpaa2_fd *fd,
108 					     uint32_t *flags,
109 					     int num_frames);
110 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
111 					       const struct qbman_eq_desc *d,
112 					       const struct dpaa2_fd *fd,
113 					       uint32_t *flags,
114 					       int num_frames);
115 static int
116 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
117 				       const struct qbman_eq_desc *d,
118 				       const struct dpaa2_fd *fd,
119 				       int num_frames);
120 static
121 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
122 					     const struct qbman_eq_desc *d,
123 					     const struct dpaa2_fd *fd,
124 					     int num_frames);
125 static int qbman_swp_pull_direct(struct qbman_swp *s,
126 				 struct qbman_pull_desc *d);
127 static int qbman_swp_pull_mem_back(struct qbman_swp *s,
128 				   struct qbman_pull_desc *d);
129 
130 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
131 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
132 
133 static int qbman_swp_release_direct(struct qbman_swp *s,
134 				    const struct qbman_release_desc *d,
135 				    const u64 *buffers,
136 				    unsigned int num_buffers);
137 static int qbman_swp_release_mem_back(struct qbman_swp *s,
138 				      const struct qbman_release_desc *d,
139 				      const u64 *buffers,
140 				      unsigned int num_buffers);
141 
142 /* Function pointers */
143 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
144 			     const struct qbman_eq_desc *d,
145 			     const struct dpaa2_fd *fd)
146 	= qbman_swp_enqueue_direct;
147 
148 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
149 				      const struct qbman_eq_desc *d,
150 				      const struct dpaa2_fd *fd,
151 				      uint32_t *flags,
152 					     int num_frames)
153 	= qbman_swp_enqueue_multiple_direct;
154 
155 int
156 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
157 				       const struct qbman_eq_desc *d,
158 				       const struct dpaa2_fd *fd,
159 				       int num_frames)
160 	= qbman_swp_enqueue_multiple_desc_direct;
161 
162 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
163 			= qbman_swp_pull_direct;
164 
165 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
166 			= qbman_swp_dqrr_next_direct;
167 
168 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
169 			     const struct qbman_release_desc *d,
170 			     const u64 *buffers,
171 			     unsigned int num_buffers)
172 			= qbman_swp_release_direct;
173 
174 /* Portal Access */
175 
176 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
177 {
178 	return readl_relaxed(p->addr_cinh + offset);
179 }
180 
181 static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
182 					u32 value)
183 {
184 	writel_relaxed(value, p->addr_cinh + offset);
185 }
186 
187 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
188 {
189 	return p->addr_cena + offset;
190 }
191 
192 #define QBMAN_CINH_SWP_CFG   0xd00
193 
194 #define SWP_CFG_DQRR_MF_SHIFT 20
195 #define SWP_CFG_EST_SHIFT     16
196 #define SWP_CFG_CPBS_SHIFT    15
197 #define SWP_CFG_WN_SHIFT      14
198 #define SWP_CFG_RPM_SHIFT     12
199 #define SWP_CFG_DCM_SHIFT     10
200 #define SWP_CFG_EPM_SHIFT     8
201 #define SWP_CFG_VPM_SHIFT     7
202 #define SWP_CFG_CPM_SHIFT     6
203 #define SWP_CFG_SD_SHIFT      5
204 #define SWP_CFG_SP_SHIFT      4
205 #define SWP_CFG_SE_SHIFT      3
206 #define SWP_CFG_DP_SHIFT      2
207 #define SWP_CFG_DE_SHIFT      1
208 #define SWP_CFG_EP_SHIFT      0
209 
210 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn,	u8 est, u8 rpm, u8 dcm,
211 				    u8 epm, int sd, int sp, int se,
212 				    int dp, int de, int ep)
213 {
214 	return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
215 		est << SWP_CFG_EST_SHIFT |
216 		wn << SWP_CFG_WN_SHIFT |
217 		rpm << SWP_CFG_RPM_SHIFT |
218 		dcm << SWP_CFG_DCM_SHIFT |
219 		epm << SWP_CFG_EPM_SHIFT |
220 		sd << SWP_CFG_SD_SHIFT |
221 		sp << SWP_CFG_SP_SHIFT |
222 		se << SWP_CFG_SE_SHIFT |
223 		dp << SWP_CFG_DP_SHIFT |
224 		de << SWP_CFG_DE_SHIFT |
225 		ep << SWP_CFG_EP_SHIFT);
226 }
227 
228 #define QMAN_RT_MODE	   0x00000100
229 
230 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
231 {
232 	/* 'first' is included, 'last' is excluded */
233 	if (first <= last)
234 		return last - first;
235 	else
236 		return (2 * ringsize) - (first - last);
237 }
238 
239 /**
240  * qbman_swp_init() - Create a functional object representing the given
241  *                    QBMan portal descriptor.
242  * @d: the given qbman swp descriptor
243  *
244  * Return qbman_swp portal for success, NULL if the object cannot
245  * be created.
246  */
247 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
248 {
249 	struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
250 	u32 reg;
251 	u32 mask_size;
252 	u32 eqcr_pi;
253 
254 	if (!p)
255 		return NULL;
256 
257 	spin_lock_init(&p->access_spinlock);
258 
259 	p->desc = d;
260 	p->mc.valid_bit = QB_VALID_BIT;
261 	p->sdq = 0;
262 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
263 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
264 	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
265 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
266 		p->mr.valid_bit = QB_VALID_BIT;
267 
268 	atomic_set(&p->vdq.available, 1);
269 	p->vdq.valid_bit = QB_VALID_BIT;
270 	p->dqrr.next_idx = 0;
271 	p->dqrr.valid_bit = QB_VALID_BIT;
272 
273 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
274 		p->dqrr.dqrr_size = 4;
275 		p->dqrr.reset_bug = 1;
276 	} else {
277 		p->dqrr.dqrr_size = 8;
278 		p->dqrr.reset_bug = 0;
279 	}
280 
281 	p->addr_cena = d->cena_bar;
282 	p->addr_cinh = d->cinh_bar;
283 
284 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
285 
286 		reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
287 			1, /* Writes Non-cacheable */
288 			0, /* EQCR_CI stashing threshold */
289 			3, /* RPM: RCR in array mode */
290 			2, /* DCM: Discrete consumption ack */
291 			2, /* EPM: EQCR in ring mode */
292 			1, /* mem stashing drop enable enable */
293 			1, /* mem stashing priority enable */
294 			1, /* mem stashing enable */
295 			1, /* dequeue stashing priority enable */
296 			0, /* dequeue stashing enable enable */
297 			0); /* EQCR_CI stashing priority enable */
298 	} else {
299 		memset(p->addr_cena, 0, 64 * 1024);
300 		reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
301 			1, /* Writes Non-cacheable */
302 			1, /* EQCR_CI stashing threshold */
303 			3, /* RPM: RCR in array mode */
304 			2, /* DCM: Discrete consumption ack */
305 			0, /* EPM: EQCR in ring mode */
306 			1, /* mem stashing drop enable */
307 			1, /* mem stashing priority enable */
308 			1, /* mem stashing enable */
309 			1, /* dequeue stashing priority enable */
310 			0, /* dequeue stashing enable */
311 			0); /* EQCR_CI stashing priority enable */
312 		reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
313 		       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
314 		       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
315 	}
316 
317 	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
318 	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
319 	if (!reg) {
320 		pr_err("qbman: the portal is not enabled!\n");
321 		kfree(p);
322 		return NULL;
323 	}
324 
325 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
326 		qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
327 		qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
328 	}
329 	/*
330 	 * SDQCR needs to be initialized to 0 when no channels are
331 	 * being dequeued from or else the QMan HW will indicate an
332 	 * error.  The values that were calculated above will be
333 	 * applied when dequeues from a specific channel are enabled.
334 	 */
335 	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
336 
337 	p->eqcr.pi_ring_size = 8;
338 	if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
339 		p->eqcr.pi_ring_size = 32;
340 		qbman_swp_enqueue_ptr =
341 			qbman_swp_enqueue_mem_back;
342 		qbman_swp_enqueue_multiple_ptr =
343 			qbman_swp_enqueue_multiple_mem_back;
344 		qbman_swp_enqueue_multiple_desc_ptr =
345 			qbman_swp_enqueue_multiple_desc_mem_back;
346 		qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
347 		qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
348 		qbman_swp_release_ptr = qbman_swp_release_mem_back;
349 	}
350 
351 	for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
352 		p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
353 	eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
354 	p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
355 	p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
356 	p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
357 			& p->eqcr.pi_ci_mask;
358 	p->eqcr.available = p->eqcr.pi_ring_size;
359 
360 	/* Initialize the software portal with a irq timeout period of 0us */
361 	qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
362 
363 	return p;
364 }
365 
366 /**
367  * qbman_swp_finish() - Create and destroy a functional object representing
368  *                      the given QBMan portal descriptor.
369  * @p: the qbman_swp object to be destroyed
370  */
371 void qbman_swp_finish(struct qbman_swp *p)
372 {
373 	kfree(p);
374 }
375 
376 /**
377  * qbman_swp_interrupt_read_status()
378  * @p: the given software portal
379  *
380  * Return the value in the SWP_ISR register.
381  */
382 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
383 {
384 	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
385 }
386 
387 /**
388  * qbman_swp_interrupt_clear_status()
389  * @p: the given software portal
390  * @mask: The mask to clear in SWP_ISR register
391  */
392 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
393 {
394 	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
395 }
396 
397 /**
398  * qbman_swp_interrupt_get_trigger() - read interrupt enable register
399  * @p: the given software portal
400  *
401  * Return the value in the SWP_IER register.
402  */
403 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
404 {
405 	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
406 }
407 
408 /**
409  * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
410  * @p: the given software portal
411  * @mask: The mask of bits to enable in SWP_IER
412  */
413 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
414 {
415 	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
416 }
417 
418 /**
419  * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
420  * @p: the given software portal object
421  *
422  * Return the value in the SWP_IIR register.
423  */
424 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
425 {
426 	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
427 }
428 
429 /**
430  * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
431  * @p: the given software portal object
432  * @inhibit: whether to inhibit the IRQs
433  */
434 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
435 {
436 	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
437 }
438 
439 /*
440  * Different management commands all use this common base layer of code to issue
441  * commands and poll for results.
442  */
443 
444 /*
445  * Returns a pointer to where the caller should fill in their management command
446  * (caller should ignore the verb byte)
447  */
448 void *qbman_swp_mc_start(struct qbman_swp *p)
449 {
450 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
451 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
452 	else
453 		return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
454 }
455 
456 /*
457  * Commits merges in the caller-supplied command verb (which should not include
458  * the valid-bit) and submits the command to hardware
459  */
460 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
461 {
462 	u8 *v = cmd;
463 
464 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
465 		dma_wmb();
466 		*v = cmd_verb | p->mc.valid_bit;
467 	} else {
468 		*v = cmd_verb | p->mc.valid_bit;
469 		dma_wmb();
470 		qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
471 	}
472 }
473 
474 /*
475  * Checks for a completed response (returns non-NULL if only if the response
476  * is complete).
477  */
478 void *qbman_swp_mc_result(struct qbman_swp *p)
479 {
480 	u32 *ret, verb;
481 
482 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
483 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
484 		/* Remove the valid-bit - command completed if the rest
485 		 * is non-zero.
486 		 */
487 		verb = ret[0] & ~QB_VALID_BIT;
488 		if (!verb)
489 			return NULL;
490 		p->mc.valid_bit ^= QB_VALID_BIT;
491 	} else {
492 		ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
493 		/* Command completed if the valid bit is toggled */
494 		if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
495 			return NULL;
496 		/* Command completed if the rest is non-zero */
497 		verb = ret[0] & ~QB_VALID_BIT;
498 		if (!verb)
499 			return NULL;
500 		p->mr.valid_bit ^= QB_VALID_BIT;
501 	}
502 
503 	return ret;
504 }
505 
506 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
507 enum qb_enqueue_commands {
508 	enqueue_empty = 0,
509 	enqueue_response_always = 1,
510 	enqueue_rejects_to_fq = 2
511 };
512 
513 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
514 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
515 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
516 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
517 
518 /*
519  * qbman_eq_desc_clear() - Clear the contents of a descriptor to
520  *                         default/starting state.
521  */
522 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
523 {
524 	memset(d, 0, sizeof(*d));
525 }
526 
527 /**
528  * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
529  * @d:                the enqueue descriptor.
530  * @respond_success:  1 = enqueue with response always; 0 = enqueue with
531  *                    rejections returned on a FQ.
532  */
533 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
534 {
535 	d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
536 	if (respond_success)
537 		d->verb |= enqueue_response_always;
538 	else
539 		d->verb |= enqueue_rejects_to_fq;
540 }
541 
542 /*
543  * Exactly one of the following descriptor "targets" should be set. (Calling any
544  * one of these will replace the effect of any prior call to one of these.)
545  *   -enqueue to a frame queue
546  *   -enqueue to a queuing destination
547  */
548 
549 /**
550  * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
551  * @d:    the enqueue descriptor
552  * @fqid: the id of the frame queue to be enqueued
553  */
554 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
555 {
556 	d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
557 	d->tgtid = cpu_to_le32(fqid);
558 }
559 
560 /**
561  * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
562  * @d:       the enqueue descriptor
563  * @qdid:    the id of the queuing destination to be enqueued
564  * @qd_bin:  the queuing destination bin
565  * @qd_prio: the queuing destination priority
566  */
567 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
568 			  u32 qd_bin, u32 qd_prio)
569 {
570 	d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
571 	d->tgtid = cpu_to_le32(qdid);
572 	d->qdbin = cpu_to_le16(qd_bin);
573 	d->qpri = qd_prio;
574 }
575 
576 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
577 #define EQAR_VB(eqar)      ((eqar) & 0x80)
578 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
579 
580 #define QB_RT_BIT ((u32)0x100)
581 /**
582  * qbman_swp_enqueue_direct() - Issue an enqueue command
583  * @s:  the software portal used for enqueue
584  * @d:  the enqueue descriptor
585  * @fd: the frame descriptor to be enqueued
586  *
587  * Please note that 'fd' should only be NULL if the "action" of the
588  * descriptor is "orp_hole" or "orp_nesn".
589  *
590  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
591  */
592 static
593 int qbman_swp_enqueue_direct(struct qbman_swp *s,
594 			     const struct qbman_eq_desc *d,
595 			     const struct dpaa2_fd *fd)
596 {
597 	int flags = 0;
598 	int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
599 
600 	if (ret >= 0)
601 		ret = 0;
602 	else
603 		ret = -EBUSY;
604 	return  ret;
605 }
606 
607 /**
608  * qbman_swp_enqueue_mem_back() - Issue an enqueue command
609  * @s:  the software portal used for enqueue
610  * @d:  the enqueue descriptor
611  * @fd: the frame descriptor to be enqueued
612  *
613  * Please note that 'fd' should only be NULL if the "action" of the
614  * descriptor is "orp_hole" or "orp_nesn".
615  *
616  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
617  */
618 static
619 int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
620 			       const struct qbman_eq_desc *d,
621 			       const struct dpaa2_fd *fd)
622 {
623 	int flags = 0;
624 	int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
625 
626 	if (ret >= 0)
627 		ret = 0;
628 	else
629 		ret = -EBUSY;
630 	return  ret;
631 }
632 
633 /**
634  * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
635  * using one enqueue descriptor
636  * @s:  the software portal used for enqueue
637  * @d:  the enqueue descriptor
638  * @fd: table pointer of frame descriptor table to be enqueued
639  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
640  * @num_frames: number of fd to be enqueued
641  *
642  * Return the number of fd enqueued, or a negative error number.
643  */
644 static
645 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
646 				      const struct qbman_eq_desc *d,
647 				      const struct dpaa2_fd *fd,
648 				      uint32_t *flags,
649 				      int num_frames)
650 {
651 	uint32_t *p = NULL;
652 	const uint32_t *cl = (uint32_t *)d;
653 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
654 	int i, num_enqueued = 0;
655 
656 	spin_lock(&s->access_spinlock);
657 	half_mask = (s->eqcr.pi_ci_mask>>1);
658 	full_mask = s->eqcr.pi_ci_mask;
659 
660 	if (!s->eqcr.available) {
661 		eqcr_ci = s->eqcr.ci;
662 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
663 		s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
664 		s->eqcr.ci &= full_mask;
665 
666 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
667 					eqcr_ci, s->eqcr.ci);
668 		if (!s->eqcr.available) {
669 			spin_unlock(&s->access_spinlock);
670 			return 0;
671 		}
672 	}
673 
674 	eqcr_pi = s->eqcr.pi;
675 	num_enqueued = (s->eqcr.available < num_frames) ?
676 			s->eqcr.available : num_frames;
677 	s->eqcr.available -= num_enqueued;
678 	/* Fill in the EQCR ring */
679 	for (i = 0; i < num_enqueued; i++) {
680 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
681 		/* Skip copying the verb */
682 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
683 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
684 		       &fd[i], sizeof(*fd));
685 		eqcr_pi++;
686 	}
687 
688 	dma_wmb();
689 
690 	/* Set the verb byte, have to substitute in the valid-bit */
691 	eqcr_pi = s->eqcr.pi;
692 	for (i = 0; i < num_enqueued; i++) {
693 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
694 		p[0] = cl[0] | s->eqcr.pi_vb;
695 		if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
696 			struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
697 
698 			d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
699 				((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
700 		}
701 		eqcr_pi++;
702 		if (!(eqcr_pi & half_mask))
703 			s->eqcr.pi_vb ^= QB_VALID_BIT;
704 	}
705 
706 	/* Flush all the cacheline without load/store in between */
707 	eqcr_pi = s->eqcr.pi;
708 	for (i = 0; i < num_enqueued; i++)
709 		eqcr_pi++;
710 	s->eqcr.pi = eqcr_pi & full_mask;
711 	spin_unlock(&s->access_spinlock);
712 
713 	return num_enqueued;
714 }
715 
716 /**
717  * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
718  * using one enqueue descriptor
719  * @s:  the software portal used for enqueue
720  * @d:  the enqueue descriptor
721  * @fd: table pointer of frame descriptor table to be enqueued
722  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
723  * @num_frames: number of fd to be enqueued
724  *
725  * Return the number of fd enqueued, or a negative error number.
726  */
727 static
728 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
729 					const struct qbman_eq_desc *d,
730 					const struct dpaa2_fd *fd,
731 					uint32_t *flags,
732 					int num_frames)
733 {
734 	uint32_t *p = NULL;
735 	const uint32_t *cl = (uint32_t *)(d);
736 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
737 	int i, num_enqueued = 0;
738 	unsigned long irq_flags;
739 
740 	spin_lock(&s->access_spinlock);
741 	local_irq_save(irq_flags);
742 
743 	half_mask = (s->eqcr.pi_ci_mask>>1);
744 	full_mask = s->eqcr.pi_ci_mask;
745 	if (!s->eqcr.available) {
746 		eqcr_ci = s->eqcr.ci;
747 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
748 		s->eqcr.ci = *p & full_mask;
749 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
750 					eqcr_ci, s->eqcr.ci);
751 		if (!s->eqcr.available) {
752 			local_irq_restore(irq_flags);
753 			spin_unlock(&s->access_spinlock);
754 			return 0;
755 		}
756 	}
757 
758 	eqcr_pi = s->eqcr.pi;
759 	num_enqueued = (s->eqcr.available < num_frames) ?
760 			s->eqcr.available : num_frames;
761 	s->eqcr.available -= num_enqueued;
762 	/* Fill in the EQCR ring */
763 	for (i = 0; i < num_enqueued; i++) {
764 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
765 		/* Skip copying the verb */
766 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
767 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
768 		       &fd[i], sizeof(*fd));
769 		eqcr_pi++;
770 	}
771 
772 	/* Set the verb byte, have to substitute in the valid-bit */
773 	eqcr_pi = s->eqcr.pi;
774 	for (i = 0; i < num_enqueued; i++) {
775 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
776 		p[0] = cl[0] | s->eqcr.pi_vb;
777 		if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
778 			struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
779 
780 			d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
781 				((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
782 		}
783 		eqcr_pi++;
784 		if (!(eqcr_pi & half_mask))
785 			s->eqcr.pi_vb ^= QB_VALID_BIT;
786 	}
787 	s->eqcr.pi = eqcr_pi & full_mask;
788 
789 	dma_wmb();
790 	qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
791 				(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
792 	local_irq_restore(irq_flags);
793 	spin_unlock(&s->access_spinlock);
794 
795 	return num_enqueued;
796 }
797 
798 /**
799  * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
800  * using multiple enqueue descriptor
801  * @s:  the software portal used for enqueue
802  * @d:  table of minimal enqueue descriptor
803  * @fd: table pointer of frame descriptor table to be enqueued
804  * @num_frames: number of fd to be enqueued
805  *
806  * Return the number of fd enqueued, or a negative error number.
807  */
808 static
809 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
810 					   const struct qbman_eq_desc *d,
811 					   const struct dpaa2_fd *fd,
812 					   int num_frames)
813 {
814 	uint32_t *p;
815 	const uint32_t *cl;
816 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
817 	int i, num_enqueued = 0;
818 
819 	half_mask = (s->eqcr.pi_ci_mask>>1);
820 	full_mask = s->eqcr.pi_ci_mask;
821 	if (!s->eqcr.available) {
822 		eqcr_ci = s->eqcr.ci;
823 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
824 		s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
825 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
826 					eqcr_ci, s->eqcr.ci);
827 		if (!s->eqcr.available)
828 			return 0;
829 	}
830 
831 	eqcr_pi = s->eqcr.pi;
832 	num_enqueued = (s->eqcr.available < num_frames) ?
833 			s->eqcr.available : num_frames;
834 	s->eqcr.available -= num_enqueued;
835 	/* Fill in the EQCR ring */
836 	for (i = 0; i < num_enqueued; i++) {
837 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
838 		cl = (uint32_t *)(&d[i]);
839 		/* Skip copying the verb */
840 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
841 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
842 		       &fd[i], sizeof(*fd));
843 		eqcr_pi++;
844 	}
845 
846 	dma_wmb();
847 
848 	/* Set the verb byte, have to substitute in the valid-bit */
849 	eqcr_pi = s->eqcr.pi;
850 	for (i = 0; i < num_enqueued; i++) {
851 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
852 		cl = (uint32_t *)(&d[i]);
853 		p[0] = cl[0] | s->eqcr.pi_vb;
854 		eqcr_pi++;
855 		if (!(eqcr_pi & half_mask))
856 			s->eqcr.pi_vb ^= QB_VALID_BIT;
857 	}
858 
859 	/* Flush all the cacheline without load/store in between */
860 	eqcr_pi = s->eqcr.pi;
861 	for (i = 0; i < num_enqueued; i++)
862 		eqcr_pi++;
863 	s->eqcr.pi = eqcr_pi & full_mask;
864 
865 	return num_enqueued;
866 }
867 
868 /**
869  * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
870  * using multiple enqueue descriptor
871  * @s:  the software portal used for enqueue
872  * @d:  table of minimal enqueue descriptor
873  * @fd: table pointer of frame descriptor table to be enqueued
874  * @num_frames: number of fd to be enqueued
875  *
876  * Return the number of fd enqueued, or a negative error number.
877  */
878 static
879 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
880 					   const struct qbman_eq_desc *d,
881 					   const struct dpaa2_fd *fd,
882 					   int num_frames)
883 {
884 	uint32_t *p;
885 	const uint32_t *cl;
886 	uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
887 	int i, num_enqueued = 0;
888 
889 	half_mask = (s->eqcr.pi_ci_mask>>1);
890 	full_mask = s->eqcr.pi_ci_mask;
891 	if (!s->eqcr.available) {
892 		eqcr_ci = s->eqcr.ci;
893 		p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
894 		s->eqcr.ci = *p & full_mask;
895 		s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
896 					eqcr_ci, s->eqcr.ci);
897 		if (!s->eqcr.available)
898 			return 0;
899 	}
900 
901 	eqcr_pi = s->eqcr.pi;
902 	num_enqueued = (s->eqcr.available < num_frames) ?
903 			s->eqcr.available : num_frames;
904 	s->eqcr.available -= num_enqueued;
905 	/* Fill in the EQCR ring */
906 	for (i = 0; i < num_enqueued; i++) {
907 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
908 		cl = (uint32_t *)(&d[i]);
909 		/* Skip copying the verb */
910 		memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
911 		memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
912 		       &fd[i], sizeof(*fd));
913 		eqcr_pi++;
914 	}
915 
916 	/* Set the verb byte, have to substitute in the valid-bit */
917 	eqcr_pi = s->eqcr.pi;
918 	for (i = 0; i < num_enqueued; i++) {
919 		p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
920 		cl = (uint32_t *)(&d[i]);
921 		p[0] = cl[0] | s->eqcr.pi_vb;
922 		eqcr_pi++;
923 		if (!(eqcr_pi & half_mask))
924 			s->eqcr.pi_vb ^= QB_VALID_BIT;
925 	}
926 
927 	s->eqcr.pi = eqcr_pi & full_mask;
928 
929 	dma_wmb();
930 	qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
931 				(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
932 
933 	return num_enqueued;
934 }
935 
936 /* Static (push) dequeue */
937 
938 /**
939  * qbman_swp_push_get() - Get the push dequeue setup
940  * @s:           the software portal object
941  * @channel_idx: the channel index to query
942  * @enabled:     returned boolean to show whether the push dequeue is enabled
943  *               for the given channel
944  */
945 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
946 {
947 	u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
948 
949 	WARN_ON(channel_idx > 15);
950 	*enabled = src | (1 << channel_idx);
951 }
952 
953 /**
954  * qbman_swp_push_set() - Enable or disable push dequeue
955  * @s:           the software portal object
956  * @channel_idx: the channel index (0 to 15)
957  * @enable:      enable or disable push dequeue
958  */
959 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
960 {
961 	u16 dqsrc;
962 
963 	WARN_ON(channel_idx > 15);
964 	if (enable)
965 		s->sdq |= 1 << channel_idx;
966 	else
967 		s->sdq &= ~(1 << channel_idx);
968 
969 	/* Read make the complete src map.  If no channels are enabled
970 	 * the SDQCR must be 0 or else QMan will assert errors
971 	 */
972 	dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
973 	if (dqsrc != 0)
974 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
975 	else
976 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
977 }
978 
979 #define QB_VDQCR_VERB_DCT_SHIFT    0
980 #define QB_VDQCR_VERB_DT_SHIFT     2
981 #define QB_VDQCR_VERB_RLS_SHIFT    4
982 #define QB_VDQCR_VERB_WAE_SHIFT    5
983 
984 enum qb_pull_dt_e {
985 	qb_pull_dt_channel,
986 	qb_pull_dt_workqueue,
987 	qb_pull_dt_framequeue
988 };
989 
990 /**
991  * qbman_pull_desc_clear() - Clear the contents of a descriptor to
992  *                           default/starting state
993  * @d: the pull dequeue descriptor to be cleared
994  */
995 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
996 {
997 	memset(d, 0, sizeof(*d));
998 }
999 
1000 /**
1001  * qbman_pull_desc_set_storage()- Set the pull dequeue storage
1002  * @d:            the pull dequeue descriptor to be set
1003  * @storage:      the pointer of the memory to store the dequeue result
1004  * @storage_phys: the physical address of the storage memory
1005  * @stash:        to indicate whether write allocate is enabled
1006  *
1007  * If not called, or if called with 'storage' as NULL, the result pull dequeues
1008  * will produce results to DQRR. If 'storage' is non-NULL, then results are
1009  * produced to the given memory location (using the DMA address which
1010  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1011  * those writes to main-memory express a cache-warming attribute.
1012  */
1013 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1014 				 struct dpaa2_dq *storage,
1015 				 dma_addr_t storage_phys,
1016 				 int stash)
1017 {
1018 	/* save the virtual address */
1019 	d->rsp_addr_virt = (u64)(uintptr_t)storage;
1020 
1021 	if (!storage) {
1022 		d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1023 		return;
1024 	}
1025 	d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1026 	if (stash)
1027 		d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1028 	else
1029 		d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1030 
1031 	d->rsp_addr = cpu_to_le64(storage_phys);
1032 }
1033 
1034 /**
1035  * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1036  * @d:         the pull dequeue descriptor to be set
1037  * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1038  */
1039 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1040 {
1041 	d->numf = numframes - 1;
1042 }
1043 
1044 /*
1045  * Exactly one of the following descriptor "actions" should be set. (Calling any
1046  * one of these will replace the effect of any prior call to one of these.)
1047  * - pull dequeue from the given frame queue (FQ)
1048  * - pull dequeue from any FQ in the given work queue (WQ)
1049  * - pull dequeue from any FQ in any WQ in the given channel
1050  */
1051 
1052 /**
1053  * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1054  * @d:    the pull dequeue descriptor to be set
1055  * @fqid: the frame queue index of the given FQ
1056  */
1057 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1058 {
1059 	d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1060 	d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1061 	d->dq_src = cpu_to_le32(fqid);
1062 }
1063 
1064 /**
1065  * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1066  * @d:    the pull dequeue descriptor to be set
1067  * @wqid: composed of channel id and wqid within the channel
1068  * @dct:  the dequeue command type
1069  */
1070 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1071 			    enum qbman_pull_type_e dct)
1072 {
1073 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1074 	d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1075 	d->dq_src = cpu_to_le32(wqid);
1076 }
1077 
1078 /**
1079  * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1080  *                                 dequeues
1081  * @d:    the pull dequeue descriptor to be set
1082  * @chid: the channel id to be dequeued
1083  * @dct:  the dequeue command type
1084  */
1085 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1086 				 enum qbman_pull_type_e dct)
1087 {
1088 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1089 	d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1090 	d->dq_src = cpu_to_le32(chid);
1091 }
1092 
1093 /**
1094  * qbman_swp_pull_direct() - Issue the pull dequeue command
1095  * @s: the software portal object
1096  * @d: the software portal descriptor which has been configured with
1097  *     the set of qbman_pull_desc_set_*() calls
1098  *
1099  * Return 0 for success, and -EBUSY if the software portal is not ready
1100  * to do pull dequeue.
1101  */
1102 static
1103 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1104 {
1105 	struct qbman_pull_desc *p;
1106 
1107 	if (!atomic_dec_and_test(&s->vdq.available)) {
1108 		atomic_inc(&s->vdq.available);
1109 		return -EBUSY;
1110 	}
1111 	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1112 	if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1113 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1114 	else
1115 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1116 	p->numf = d->numf;
1117 	p->tok = QMAN_DQ_TOKEN_VALID;
1118 	p->dq_src = d->dq_src;
1119 	p->rsp_addr = d->rsp_addr;
1120 	p->rsp_addr_virt = d->rsp_addr_virt;
1121 	dma_wmb();
1122 	/* Set the verb byte, have to substitute in the valid-bit */
1123 	p->verb = d->verb | s->vdq.valid_bit;
1124 	s->vdq.valid_bit ^= QB_VALID_BIT;
1125 
1126 	return 0;
1127 }
1128 
1129 /**
1130  * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1131  * @s: the software portal object
1132  * @d: the software portal descriptor which has been configured with
1133  *     the set of qbman_pull_desc_set_*() calls
1134  *
1135  * Return 0 for success, and -EBUSY if the software portal is not ready
1136  * to do pull dequeue.
1137  */
1138 static
1139 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1140 {
1141 	struct qbman_pull_desc *p;
1142 
1143 	if (!atomic_dec_and_test(&s->vdq.available)) {
1144 		atomic_inc(&s->vdq.available);
1145 		return -EBUSY;
1146 	}
1147 	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1148 	if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1149 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1150 	else
1151 		p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1152 	p->numf = d->numf;
1153 	p->tok = QMAN_DQ_TOKEN_VALID;
1154 	p->dq_src = d->dq_src;
1155 	p->rsp_addr = d->rsp_addr;
1156 	p->rsp_addr_virt = d->rsp_addr_virt;
1157 
1158 	/* Set the verb byte, have to substitute in the valid-bit */
1159 	p->verb = d->verb | s->vdq.valid_bit;
1160 	s->vdq.valid_bit ^= QB_VALID_BIT;
1161 	dma_wmb();
1162 	qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1163 
1164 	return 0;
1165 }
1166 
1167 #define QMAN_DQRR_PI_MASK   0xf
1168 
1169 /**
1170  * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1171  * @s: the software portal object
1172  *
1173  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1174  * only once, so repeated calls can return a sequence of DQRR entries, without
1175  * requiring they be consumed immediately or in any particular order.
1176  */
1177 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1178 {
1179 	u32 verb;
1180 	u32 response_verb;
1181 	u32 flags;
1182 	struct dpaa2_dq *p;
1183 
1184 	/* Before using valid-bit to detect if something is there, we have to
1185 	 * handle the case of the DQRR reset bug...
1186 	 */
1187 	if (unlikely(s->dqrr.reset_bug)) {
1188 		/*
1189 		 * We pick up new entries by cache-inhibited producer index,
1190 		 * which means that a non-coherent mapping would require us to
1191 		 * invalidate and read *only* once that PI has indicated that
1192 		 * there's an entry here. The first trip around the DQRR ring
1193 		 * will be much less efficient than all subsequent trips around
1194 		 * it...
1195 		 */
1196 		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1197 			QMAN_DQRR_PI_MASK;
1198 
1199 		/* there are new entries if pi != next_idx */
1200 		if (pi == s->dqrr.next_idx)
1201 			return NULL;
1202 
1203 		/*
1204 		 * if next_idx is/was the last ring index, and 'pi' is
1205 		 * different, we can disable the workaround as all the ring
1206 		 * entries have now been DMA'd to so valid-bit checking is
1207 		 * repaired. Note: this logic needs to be based on next_idx
1208 		 * (which increments one at a time), rather than on pi (which
1209 		 * can burst and wrap-around between our snapshots of it).
1210 		 */
1211 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1212 			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1213 				 s->dqrr.next_idx, pi);
1214 			s->dqrr.reset_bug = 0;
1215 		}
1216 		prefetch(qbman_get_cmd(s,
1217 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1218 	}
1219 
1220 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1221 	verb = p->dq.verb;
1222 
1223 	/*
1224 	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1225 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
1226 	 * check, because we've already determined that a new entry is available
1227 	 * and we've invalidated the cacheline before reading it, so the
1228 	 * valid-bit behaviour is repaired and should tell us what we already
1229 	 * knew from reading PI.
1230 	 */
1231 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1232 		prefetch(qbman_get_cmd(s,
1233 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1234 		return NULL;
1235 	}
1236 	/*
1237 	 * There's something there. Move "next_idx" attention to the next ring
1238 	 * entry (and prefetch it) before returning what we found.
1239 	 */
1240 	s->dqrr.next_idx++;
1241 	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1242 	if (!s->dqrr.next_idx)
1243 		s->dqrr.valid_bit ^= QB_VALID_BIT;
1244 
1245 	/*
1246 	 * If this is the final response to a volatile dequeue command
1247 	 * indicate that the vdq is available
1248 	 */
1249 	flags = p->dq.stat;
1250 	response_verb = verb & QBMAN_RESULT_MASK;
1251 	if ((response_verb == QBMAN_RESULT_DQ) &&
1252 	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
1253 	    (flags & DPAA2_DQ_STAT_EXPIRED))
1254 		atomic_inc(&s->vdq.available);
1255 
1256 	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1257 
1258 	return p;
1259 }
1260 
1261 /**
1262  * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1263  * @s: the software portal object
1264  *
1265  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1266  * only once, so repeated calls can return a sequence of DQRR entries, without
1267  * requiring they be consumed immediately or in any particular order.
1268  */
1269 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1270 {
1271 	u32 verb;
1272 	u32 response_verb;
1273 	u32 flags;
1274 	struct dpaa2_dq *p;
1275 
1276 	/* Before using valid-bit to detect if something is there, we have to
1277 	 * handle the case of the DQRR reset bug...
1278 	 */
1279 	if (unlikely(s->dqrr.reset_bug)) {
1280 		/*
1281 		 * We pick up new entries by cache-inhibited producer index,
1282 		 * which means that a non-coherent mapping would require us to
1283 		 * invalidate and read *only* once that PI has indicated that
1284 		 * there's an entry here. The first trip around the DQRR ring
1285 		 * will be much less efficient than all subsequent trips around
1286 		 * it...
1287 		 */
1288 		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1289 			QMAN_DQRR_PI_MASK;
1290 
1291 		/* there are new entries if pi != next_idx */
1292 		if (pi == s->dqrr.next_idx)
1293 			return NULL;
1294 
1295 		/*
1296 		 * if next_idx is/was the last ring index, and 'pi' is
1297 		 * different, we can disable the workaround as all the ring
1298 		 * entries have now been DMA'd to so valid-bit checking is
1299 		 * repaired. Note: this logic needs to be based on next_idx
1300 		 * (which increments one at a time), rather than on pi (which
1301 		 * can burst and wrap-around between our snapshots of it).
1302 		 */
1303 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1304 			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1305 				 s->dqrr.next_idx, pi);
1306 			s->dqrr.reset_bug = 0;
1307 		}
1308 		prefetch(qbman_get_cmd(s,
1309 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1310 	}
1311 
1312 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1313 	verb = p->dq.verb;
1314 
1315 	/*
1316 	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1317 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
1318 	 * check, because we've already determined that a new entry is available
1319 	 * and we've invalidated the cacheline before reading it, so the
1320 	 * valid-bit behaviour is repaired and should tell us what we already
1321 	 * knew from reading PI.
1322 	 */
1323 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1324 		prefetch(qbman_get_cmd(s,
1325 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1326 		return NULL;
1327 	}
1328 	/*
1329 	 * There's something there. Move "next_idx" attention to the next ring
1330 	 * entry (and prefetch it) before returning what we found.
1331 	 */
1332 	s->dqrr.next_idx++;
1333 	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1334 	if (!s->dqrr.next_idx)
1335 		s->dqrr.valid_bit ^= QB_VALID_BIT;
1336 
1337 	/*
1338 	 * If this is the final response to a volatile dequeue command
1339 	 * indicate that the vdq is available
1340 	 */
1341 	flags = p->dq.stat;
1342 	response_verb = verb & QBMAN_RESULT_MASK;
1343 	if ((response_verb == QBMAN_RESULT_DQ) &&
1344 	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
1345 	    (flags & DPAA2_DQ_STAT_EXPIRED))
1346 		atomic_inc(&s->vdq.available);
1347 
1348 	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1349 
1350 	return p;
1351 }
1352 
1353 /**
1354  * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1355  *                             qbman_swp_dqrr_next().
1356  * @s: the software portal object
1357  * @dq: the DQRR entry to be consumed
1358  */
1359 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1360 {
1361 	qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1362 }
1363 
1364 /**
1365  * qbman_result_has_new_result() - Check and get the dequeue response from the
1366  *                                 dq storage memory set in pull dequeue command
1367  * @s: the software portal object
1368  * @dq: the dequeue result read from the memory
1369  *
1370  * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1371  * dequeue result.
1372  *
1373  * Only used for user-provided storage of dequeue results, not DQRR. For
1374  * efficiency purposes, the driver will perform any required endianness
1375  * conversion to ensure that the user's dequeue result storage is in host-endian
1376  * format. As such, once the user has called qbman_result_has_new_result() and
1377  * been returned a valid dequeue result, they should not call it again on
1378  * the same memory location (except of course if another dequeue command has
1379  * been executed to produce a new result to that location).
1380  */
1381 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1382 {
1383 	if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1384 		return 0;
1385 
1386 	/*
1387 	 * Set token to be 0 so we will detect change back to 1
1388 	 * next time the looping is traversed. Const is cast away here
1389 	 * as we want users to treat the dequeue responses as read only.
1390 	 */
1391 	((struct dpaa2_dq *)dq)->dq.tok = 0;
1392 
1393 	/*
1394 	 * Determine whether VDQCR is available based on whether the
1395 	 * current result is sitting in the first storage location of
1396 	 * the busy command.
1397 	 */
1398 	if (s->vdq.storage == dq) {
1399 		s->vdq.storage = NULL;
1400 		atomic_inc(&s->vdq.available);
1401 	}
1402 
1403 	return 1;
1404 }
1405 
1406 /**
1407  * qbman_release_desc_clear() - Clear the contents of a descriptor to
1408  *                              default/starting state.
1409  * @d: the pull dequeue descriptor to be cleared
1410  */
1411 void qbman_release_desc_clear(struct qbman_release_desc *d)
1412 {
1413 	memset(d, 0, sizeof(*d));
1414 	d->verb = 1 << 5; /* Release Command Valid */
1415 }
1416 
1417 /**
1418  * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1419  * @d:    the pull dequeue descriptor to be set
1420  * @bpid: the bpid value to be set
1421  */
1422 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1423 {
1424 	d->bpid = cpu_to_le16(bpid);
1425 }
1426 
1427 /**
1428  * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1429  * interrupt source should be asserted after the release command is completed.
1430  * @d:      the pull dequeue descriptor to be set
1431  * @enable: enable (1) or disable (0) value
1432  */
1433 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1434 {
1435 	if (enable)
1436 		d->verb |= 1 << 6;
1437 	else
1438 		d->verb &= ~(1 << 6);
1439 }
1440 
1441 #define RAR_IDX(rar)     ((rar) & 0x7)
1442 #define RAR_VB(rar)      ((rar) & 0x80)
1443 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1444 
1445 /**
1446  * qbman_swp_release_direct() - Issue a buffer release command
1447  * @s:           the software portal object
1448  * @d:           the release descriptor
1449  * @buffers:     a pointer pointing to the buffer address to be released
1450  * @num_buffers: number of buffers to be released,  must be less than 8
1451  *
1452  * Return 0 for success, -EBUSY if the release command ring is not ready.
1453  */
1454 int qbman_swp_release_direct(struct qbman_swp *s,
1455 			     const struct qbman_release_desc *d,
1456 			     const u64 *buffers, unsigned int num_buffers)
1457 {
1458 	int i;
1459 	struct qbman_release_desc *p;
1460 	u32 rar;
1461 
1462 	if (!num_buffers || (num_buffers > 7))
1463 		return -EINVAL;
1464 
1465 	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1466 	if (!RAR_SUCCESS(rar))
1467 		return -EBUSY;
1468 
1469 	/* Start the release command */
1470 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1471 
1472 	/* Copy the caller's buffer pointers to the command */
1473 	for (i = 0; i < num_buffers; i++)
1474 		p->buf[i] = cpu_to_le64(buffers[i]);
1475 	p->bpid = d->bpid;
1476 
1477 	/*
1478 	 * Set the verb byte, have to substitute in the valid-bit
1479 	 * and the number of buffers.
1480 	 */
1481 	dma_wmb();
1482 	p->verb = d->verb | RAR_VB(rar) | num_buffers;
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * qbman_swp_release_mem_back() - Issue a buffer release command
1489  * @s:           the software portal object
1490  * @d:           the release descriptor
1491  * @buffers:     a pointer pointing to the buffer address to be released
1492  * @num_buffers: number of buffers to be released,  must be less than 8
1493  *
1494  * Return 0 for success, -EBUSY if the release command ring is not ready.
1495  */
1496 int qbman_swp_release_mem_back(struct qbman_swp *s,
1497 			       const struct qbman_release_desc *d,
1498 			       const u64 *buffers, unsigned int num_buffers)
1499 {
1500 	int i;
1501 	struct qbman_release_desc *p;
1502 	u32 rar;
1503 
1504 	if (!num_buffers || (num_buffers > 7))
1505 		return -EINVAL;
1506 
1507 	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1508 	if (!RAR_SUCCESS(rar))
1509 		return -EBUSY;
1510 
1511 	/* Start the release command */
1512 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1513 
1514 	/* Copy the caller's buffer pointers to the command */
1515 	for (i = 0; i < num_buffers; i++)
1516 		p->buf[i] = cpu_to_le64(buffers[i]);
1517 	p->bpid = d->bpid;
1518 
1519 	p->verb = d->verb | RAR_VB(rar) | num_buffers;
1520 	dma_wmb();
1521 	qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1522 			     RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1523 
1524 	return 0;
1525 }
1526 
1527 struct qbman_acquire_desc {
1528 	u8 verb;
1529 	u8 reserved;
1530 	__le16 bpid;
1531 	u8 num;
1532 	u8 reserved2[59];
1533 };
1534 
1535 struct qbman_acquire_rslt {
1536 	u8 verb;
1537 	u8 rslt;
1538 	__le16 reserved;
1539 	u8 num;
1540 	u8 reserved2[3];
1541 	__le64 buf[7];
1542 };
1543 
1544 /**
1545  * qbman_swp_acquire() - Issue a buffer acquire command
1546  * @s:           the software portal object
1547  * @bpid:        the buffer pool index
1548  * @buffers:     a pointer pointing to the acquired buffer addresses
1549  * @num_buffers: number of buffers to be acquired, must be less than 8
1550  *
1551  * Return 0 for success, or negative error code if the acquire command
1552  * fails.
1553  */
1554 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1555 		      unsigned int num_buffers)
1556 {
1557 	struct qbman_acquire_desc *p;
1558 	struct qbman_acquire_rslt *r;
1559 	int i;
1560 
1561 	if (!num_buffers || (num_buffers > 7))
1562 		return -EINVAL;
1563 
1564 	/* Start the management command */
1565 	p = qbman_swp_mc_start(s);
1566 
1567 	if (!p)
1568 		return -EBUSY;
1569 
1570 	/* Encode the caller-provided attributes */
1571 	p->bpid = cpu_to_le16(bpid);
1572 	p->num = num_buffers;
1573 
1574 	/* Complete the management command */
1575 	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1576 	if (unlikely(!r)) {
1577 		pr_err("qbman: acquire from BPID %d failed, no response\n",
1578 		       bpid);
1579 		return -EIO;
1580 	}
1581 
1582 	/* Decode the outcome */
1583 	WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1584 
1585 	/* Determine success or failure */
1586 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1587 		pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1588 		       bpid, r->rslt);
1589 		return -EIO;
1590 	}
1591 
1592 	WARN_ON(r->num > num_buffers);
1593 
1594 	/* Copy the acquired buffers to the caller's array */
1595 	for (i = 0; i < r->num; i++)
1596 		buffers[i] = le64_to_cpu(r->buf[i]);
1597 
1598 	return (int)r->num;
1599 }
1600 
1601 struct qbman_alt_fq_state_desc {
1602 	u8 verb;
1603 	u8 reserved[3];
1604 	__le32 fqid;
1605 	u8 reserved2[56];
1606 };
1607 
1608 struct qbman_alt_fq_state_rslt {
1609 	u8 verb;
1610 	u8 rslt;
1611 	u8 reserved[62];
1612 };
1613 
1614 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1615 
1616 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1617 			   u8 alt_fq_verb)
1618 {
1619 	struct qbman_alt_fq_state_desc *p;
1620 	struct qbman_alt_fq_state_rslt *r;
1621 
1622 	/* Start the management command */
1623 	p = qbman_swp_mc_start(s);
1624 	if (!p)
1625 		return -EBUSY;
1626 
1627 	p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1628 
1629 	/* Complete the management command */
1630 	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1631 	if (unlikely(!r)) {
1632 		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1633 		       alt_fq_verb);
1634 		return -EIO;
1635 	}
1636 
1637 	/* Decode the outcome */
1638 	WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1639 
1640 	/* Determine success or failure */
1641 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1642 		pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1643 		       fqid, r->verb, r->rslt);
1644 		return -EIO;
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 struct qbman_cdan_ctrl_desc {
1651 	u8 verb;
1652 	u8 reserved;
1653 	__le16 ch;
1654 	u8 we;
1655 	u8 ctrl;
1656 	__le16 reserved2;
1657 	__le64 cdan_ctx;
1658 	u8 reserved3[48];
1659 
1660 };
1661 
1662 struct qbman_cdan_ctrl_rslt {
1663 	u8 verb;
1664 	u8 rslt;
1665 	__le16 ch;
1666 	u8 reserved[60];
1667 };
1668 
1669 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1670 		       u8 we_mask, u8 cdan_en,
1671 		       u64 ctx)
1672 {
1673 	struct qbman_cdan_ctrl_desc *p = NULL;
1674 	struct qbman_cdan_ctrl_rslt *r = NULL;
1675 
1676 	/* Start the management command */
1677 	p = qbman_swp_mc_start(s);
1678 	if (!p)
1679 		return -EBUSY;
1680 
1681 	/* Encode the caller-provided attributes */
1682 	p->ch = cpu_to_le16(channelid);
1683 	p->we = we_mask;
1684 	if (cdan_en)
1685 		p->ctrl = 1;
1686 	else
1687 		p->ctrl = 0;
1688 	p->cdan_ctx = cpu_to_le64(ctx);
1689 
1690 	/* Complete the management command */
1691 	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1692 	if (unlikely(!r)) {
1693 		pr_err("qbman: wqchan config failed, no response\n");
1694 		return -EIO;
1695 	}
1696 
1697 	WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1698 
1699 	/* Determine success or failure */
1700 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1701 		pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1702 		       channelid, r->rslt);
1703 		return -EIO;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 #define QBMAN_RESPONSE_VERB_MASK	0x7f
1710 #define QBMAN_FQ_QUERY_NP		0x45
1711 #define QBMAN_BP_QUERY			0x32
1712 
1713 struct qbman_fq_query_desc {
1714 	u8 verb;
1715 	u8 reserved[3];
1716 	__le32 fqid;
1717 	u8 reserved2[56];
1718 };
1719 
1720 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1721 			 struct qbman_fq_query_np_rslt *r)
1722 {
1723 	struct qbman_fq_query_desc *p;
1724 	void *resp;
1725 
1726 	p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1727 	if (!p)
1728 		return -EBUSY;
1729 
1730 	/* FQID is a 24 bit value */
1731 	p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1732 	resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1733 	if (!resp) {
1734 		pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1735 		       fqid);
1736 		return -EIO;
1737 	}
1738 	*r = *(struct qbman_fq_query_np_rslt *)resp;
1739 	/* Decode the outcome */
1740 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1741 
1742 	/* Determine success or failure */
1743 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1744 		pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1745 		       p->fqid, r->rslt);
1746 		return -EIO;
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1753 {
1754 	return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1755 }
1756 
1757 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1758 {
1759 	return le32_to_cpu(r->byte_cnt);
1760 }
1761 
1762 struct qbman_bp_query_desc {
1763 	u8 verb;
1764 	u8 reserved;
1765 	__le16 bpid;
1766 	u8 reserved2[60];
1767 };
1768 
1769 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1770 		   struct qbman_bp_query_rslt *r)
1771 {
1772 	struct qbman_bp_query_desc *p;
1773 	void *resp;
1774 
1775 	p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1776 	if (!p)
1777 		return -EBUSY;
1778 
1779 	p->bpid = cpu_to_le16(bpid);
1780 	resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1781 	if (!resp) {
1782 		pr_err("qbman: Query BPID %d fields failed, no response\n",
1783 		       bpid);
1784 		return -EIO;
1785 	}
1786 	*r = *(struct qbman_bp_query_rslt *)resp;
1787 	/* Decode the outcome */
1788 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1789 
1790 	/* Determine success or failure */
1791 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1792 		pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1793 		       bpid, r->rslt);
1794 		return -EIO;
1795 	}
1796 
1797 	return 0;
1798 }
1799 
1800 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1801 {
1802 	return le32_to_cpu(a->fill);
1803 }
1804 
1805 /**
1806  * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
1807  * @p: the software portal object
1808  * @irq_threshold: interrupt threshold
1809  * @irq_holdoff: interrupt holdoff (timeout) period in us
1810  *
1811  * Return 0 for success, or negative error code on error.
1812  */
1813 int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
1814 				 u32 irq_holdoff)
1815 {
1816 	u32 itp, max_holdoff;
1817 
1818 	/* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
1819 	 * increments. This depends on the QBMAN internal frequency.
1820 	 */
1821 	itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
1822 	if (itp > 4096) {
1823 		max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
1824 		pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
1825 		return -EINVAL;
1826 	}
1827 
1828 	if (irq_threshold >= p->dqrr.dqrr_size) {
1829 		pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
1830 		return -EINVAL;
1831 	}
1832 
1833 	p->irq_threshold = irq_threshold;
1834 	p->irq_holdoff = irq_holdoff;
1835 
1836 	qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
1837 	qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
1844  * @p: the software portal object
1845  * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
1846  * DQRR entries in the portal than the threshold)
1847  * @irq_holdoff: interrupt holdoff (timeout) period in us
1848  */
1849 void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
1850 				  u32 *irq_holdoff)
1851 {
1852 	if (irq_threshold)
1853 		*irq_threshold = p->irq_threshold;
1854 	if (irq_holdoff)
1855 		*irq_holdoff = p->irq_holdoff;
1856 }
1857