1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4  * Copyright 2016 NXP
5  *
6  */
7 
8 #include <asm/cacheflush.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <soc/fsl/dpaa2-global.h>
12 
13 #include "qbman-portal.h"
14 
15 #define QMAN_REV_4000   0x04000000
16 #define QMAN_REV_4100   0x04010000
17 #define QMAN_REV_4101   0x04010001
18 #define QMAN_REV_MASK   0xffff0000
19 
20 /* All QBMan command and result structures use this "valid bit" encoding */
21 #define QB_VALID_BIT ((u32)0x80)
22 
23 /* QBMan portal management command codes */
24 #define QBMAN_MC_ACQUIRE       0x30
25 #define QBMAN_WQCHAN_CONFIGURE 0x46
26 
27 /* CINH register offsets */
28 #define QBMAN_CINH_SWP_EQAR    0x8c0
29 #define QBMAN_CINH_SWP_DQPI    0xa00
30 #define QBMAN_CINH_SWP_DCAP    0xac0
31 #define QBMAN_CINH_SWP_SDQCR   0xb00
32 #define QBMAN_CINH_SWP_RAR     0xcc0
33 #define QBMAN_CINH_SWP_ISR     0xe00
34 #define QBMAN_CINH_SWP_IER     0xe40
35 #define QBMAN_CINH_SWP_ISDR    0xe80
36 #define QBMAN_CINH_SWP_IIR     0xec0
37 
38 /* CENA register offsets */
39 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
40 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
41 #define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
42 #define QBMAN_CENA_SWP_CR      0x600
43 #define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
44 #define QBMAN_CENA_SWP_VDQCR   0x780
45 
46 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
47 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
48 
49 /* Define token used to determine if response written to memory is valid */
50 #define QMAN_DQ_TOKEN_VALID 1
51 
52 /* SDQCR attribute codes */
53 #define QB_SDQCR_FC_SHIFT   29
54 #define QB_SDQCR_FC_MASK    0x1
55 #define QB_SDQCR_DCT_SHIFT  24
56 #define QB_SDQCR_DCT_MASK   0x3
57 #define QB_SDQCR_TOK_SHIFT  16
58 #define QB_SDQCR_TOK_MASK   0xff
59 #define QB_SDQCR_SRC_SHIFT  0
60 #define QB_SDQCR_SRC_MASK   0xffff
61 
62 /* opaque token for static dequeues */
63 #define QMAN_SDQCR_TOKEN    0xbb
64 
65 enum qbman_sdqcr_dct {
66 	qbman_sdqcr_dct_null = 0,
67 	qbman_sdqcr_dct_prio_ics,
68 	qbman_sdqcr_dct_active_ics,
69 	qbman_sdqcr_dct_active
70 };
71 
72 enum qbman_sdqcr_fc {
73 	qbman_sdqcr_fc_one = 0,
74 	qbman_sdqcr_fc_up_to_3 = 1
75 };
76 
77 /* Portal Access */
78 
79 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
80 {
81 	return readl_relaxed(p->addr_cinh + offset);
82 }
83 
84 static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
85 					u32 value)
86 {
87 	writel_relaxed(value, p->addr_cinh + offset);
88 }
89 
90 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
91 {
92 	return p->addr_cena + offset;
93 }
94 
95 #define QBMAN_CINH_SWP_CFG   0xd00
96 
97 #define SWP_CFG_DQRR_MF_SHIFT 20
98 #define SWP_CFG_EST_SHIFT     16
99 #define SWP_CFG_WN_SHIFT      14
100 #define SWP_CFG_RPM_SHIFT     12
101 #define SWP_CFG_DCM_SHIFT     10
102 #define SWP_CFG_EPM_SHIFT     8
103 #define SWP_CFG_SD_SHIFT      5
104 #define SWP_CFG_SP_SHIFT      4
105 #define SWP_CFG_SE_SHIFT      3
106 #define SWP_CFG_DP_SHIFT      2
107 #define SWP_CFG_DE_SHIFT      1
108 #define SWP_CFG_EP_SHIFT      0
109 
110 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn,	u8 est, u8 rpm, u8 dcm,
111 				    u8 epm, int sd, int sp, int se,
112 				    int dp, int de, int ep)
113 {
114 	return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
115 		est << SWP_CFG_EST_SHIFT |
116 		wn << SWP_CFG_WN_SHIFT |
117 		rpm << SWP_CFG_RPM_SHIFT |
118 		dcm << SWP_CFG_DCM_SHIFT |
119 		epm << SWP_CFG_EPM_SHIFT |
120 		sd << SWP_CFG_SD_SHIFT |
121 		sp << SWP_CFG_SP_SHIFT |
122 		se << SWP_CFG_SE_SHIFT |
123 		dp << SWP_CFG_DP_SHIFT |
124 		de << SWP_CFG_DE_SHIFT |
125 		ep << SWP_CFG_EP_SHIFT);
126 }
127 
128 /**
129  * qbman_swp_init() - Create a functional object representing the given
130  *                    QBMan portal descriptor.
131  * @d: the given qbman swp descriptor
132  *
133  * Return qbman_swp portal for success, NULL if the object cannot
134  * be created.
135  */
136 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
137 {
138 	struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
139 	u32 reg;
140 
141 	if (!p)
142 		return NULL;
143 	p->desc = d;
144 	p->mc.valid_bit = QB_VALID_BIT;
145 	p->sdq = 0;
146 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
147 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
148 	p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
149 
150 	atomic_set(&p->vdq.available, 1);
151 	p->vdq.valid_bit = QB_VALID_BIT;
152 	p->dqrr.next_idx = 0;
153 	p->dqrr.valid_bit = QB_VALID_BIT;
154 
155 	if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
156 		p->dqrr.dqrr_size = 4;
157 		p->dqrr.reset_bug = 1;
158 	} else {
159 		p->dqrr.dqrr_size = 8;
160 		p->dqrr.reset_bug = 0;
161 	}
162 
163 	p->addr_cena = d->cena_bar;
164 	p->addr_cinh = d->cinh_bar;
165 
166 	reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
167 				1, /* Writes Non-cacheable */
168 				0, /* EQCR_CI stashing threshold */
169 				3, /* RPM: Valid bit mode, RCR in array mode */
170 				2, /* DCM: Discrete consumption ack mode */
171 				3, /* EPM: Valid bit mode, EQCR in array mode */
172 				0, /* mem stashing drop enable == FALSE */
173 				1, /* mem stashing priority == TRUE */
174 				0, /* mem stashing enable == FALSE */
175 				1, /* dequeue stashing priority == TRUE */
176 				0, /* dequeue stashing enable == FALSE */
177 				0); /* EQCR_CI stashing priority == FALSE */
178 
179 	qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
180 	reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
181 	if (!reg) {
182 		pr_err("qbman: the portal is not enabled!\n");
183 		kfree(p);
184 		return NULL;
185 	}
186 
187 	/*
188 	 * SDQCR needs to be initialized to 0 when no channels are
189 	 * being dequeued from or else the QMan HW will indicate an
190 	 * error.  The values that were calculated above will be
191 	 * applied when dequeues from a specific channel are enabled.
192 	 */
193 	qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
194 	return p;
195 }
196 
197 /**
198  * qbman_swp_finish() - Create and destroy a functional object representing
199  *                      the given QBMan portal descriptor.
200  * @p: the qbman_swp object to be destroyed
201  */
202 void qbman_swp_finish(struct qbman_swp *p)
203 {
204 	kfree(p);
205 }
206 
207 /**
208  * qbman_swp_interrupt_read_status()
209  * @p: the given software portal
210  *
211  * Return the value in the SWP_ISR register.
212  */
213 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
214 {
215 	return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
216 }
217 
218 /**
219  * qbman_swp_interrupt_clear_status()
220  * @p: the given software portal
221  * @mask: The mask to clear in SWP_ISR register
222  */
223 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
224 {
225 	qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
226 }
227 
228 /**
229  * qbman_swp_interrupt_get_trigger() - read interrupt enable register
230  * @p: the given software portal
231  *
232  * Return the value in the SWP_IER register.
233  */
234 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
235 {
236 	return qbman_read_register(p, QBMAN_CINH_SWP_IER);
237 }
238 
239 /**
240  * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
241  * @p: the given software portal
242  * @mask: The mask of bits to enable in SWP_IER
243  */
244 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
245 {
246 	qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
247 }
248 
249 /**
250  * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
251  * @p: the given software portal object
252  *
253  * Return the value in the SWP_IIR register.
254  */
255 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
256 {
257 	return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
258 }
259 
260 /**
261  * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
262  * @p: the given software portal object
263  * @mask: The mask to set in SWP_IIR register
264  */
265 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
266 {
267 	qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
268 }
269 
270 /*
271  * Different management commands all use this common base layer of code to issue
272  * commands and poll for results.
273  */
274 
275 /*
276  * Returns a pointer to where the caller should fill in their management command
277  * (caller should ignore the verb byte)
278  */
279 void *qbman_swp_mc_start(struct qbman_swp *p)
280 {
281 	return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
282 }
283 
284 /*
285  * Commits merges in the caller-supplied command verb (which should not include
286  * the valid-bit) and submits the command to hardware
287  */
288 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
289 {
290 	u8 *v = cmd;
291 
292 	dma_wmb();
293 	*v = cmd_verb | p->mc.valid_bit;
294 }
295 
296 /*
297  * Checks for a completed response (returns non-NULL if only if the response
298  * is complete).
299  */
300 void *qbman_swp_mc_result(struct qbman_swp *p)
301 {
302 	u32 *ret, verb;
303 
304 	ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
305 
306 	/* Remove the valid-bit - command completed if the rest is non-zero */
307 	verb = ret[0] & ~QB_VALID_BIT;
308 	if (!verb)
309 		return NULL;
310 	p->mc.valid_bit ^= QB_VALID_BIT;
311 	return ret;
312 }
313 
314 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
315 enum qb_enqueue_commands {
316 	enqueue_empty = 0,
317 	enqueue_response_always = 1,
318 	enqueue_rejects_to_fq = 2
319 };
320 
321 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
322 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
323 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
324 
325 /**
326  * qbman_eq_desc_clear() - Clear the contents of a descriptor to
327  *                         default/starting state.
328  */
329 void qbman_eq_desc_clear(struct qbman_eq_desc *d)
330 {
331 	memset(d, 0, sizeof(*d));
332 }
333 
334 /**
335  * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
336  * @d:                the enqueue descriptor.
337  * @response_success: 1 = enqueue with response always; 0 = enqueue with
338  *                    rejections returned on a FQ.
339  */
340 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
341 {
342 	d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
343 	if (respond_success)
344 		d->verb |= enqueue_response_always;
345 	else
346 		d->verb |= enqueue_rejects_to_fq;
347 }
348 
349 /*
350  * Exactly one of the following descriptor "targets" should be set. (Calling any
351  * one of these will replace the effect of any prior call to one of these.)
352  *   -enqueue to a frame queue
353  *   -enqueue to a queuing destination
354  */
355 
356 /**
357  * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
358  * @d:    the enqueue descriptor
359  * @fqid: the id of the frame queue to be enqueued
360  */
361 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
362 {
363 	d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
364 	d->tgtid = cpu_to_le32(fqid);
365 }
366 
367 /**
368  * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
369  * @d:       the enqueue descriptor
370  * @qdid:    the id of the queuing destination to be enqueued
371  * @qd_bin:  the queuing destination bin
372  * @qd_prio: the queuing destination priority
373  */
374 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
375 			  u32 qd_bin, u32 qd_prio)
376 {
377 	d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
378 	d->tgtid = cpu_to_le32(qdid);
379 	d->qdbin = cpu_to_le16(qd_bin);
380 	d->qpri = qd_prio;
381 }
382 
383 #define EQAR_IDX(eqar)     ((eqar) & 0x7)
384 #define EQAR_VB(eqar)      ((eqar) & 0x80)
385 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
386 
387 /**
388  * qbman_swp_enqueue() - Issue an enqueue command
389  * @s:  the software portal used for enqueue
390  * @d:  the enqueue descriptor
391  * @fd: the frame descriptor to be enqueued
392  *
393  * Please note that 'fd' should only be NULL if the "action" of the
394  * descriptor is "orp_hole" or "orp_nesn".
395  *
396  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
397  */
398 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
399 		      const struct dpaa2_fd *fd)
400 {
401 	struct qbman_eq_desc *p;
402 	u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
403 
404 	if (!EQAR_SUCCESS(eqar))
405 		return -EBUSY;
406 
407 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
408 	memcpy(&p->dca, &d->dca, 31);
409 	memcpy(&p->fd, fd, sizeof(*fd));
410 
411 	/* Set the verb byte, have to substitute in the valid-bit */
412 	dma_wmb();
413 	p->verb = d->verb | EQAR_VB(eqar);
414 
415 	return 0;
416 }
417 
418 /* Static (push) dequeue */
419 
420 /**
421  * qbman_swp_push_get() - Get the push dequeue setup
422  * @p:           the software portal object
423  * @channel_idx: the channel index to query
424  * @enabled:     returned boolean to show whether the push dequeue is enabled
425  *               for the given channel
426  */
427 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
428 {
429 	u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
430 
431 	WARN_ON(channel_idx > 15);
432 	*enabled = src | (1 << channel_idx);
433 }
434 
435 /**
436  * qbman_swp_push_set() - Enable or disable push dequeue
437  * @p:           the software portal object
438  * @channel_idx: the channel index (0 to 15)
439  * @enable:      enable or disable push dequeue
440  */
441 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
442 {
443 	u16 dqsrc;
444 
445 	WARN_ON(channel_idx > 15);
446 	if (enable)
447 		s->sdq |= 1 << channel_idx;
448 	else
449 		s->sdq &= ~(1 << channel_idx);
450 
451 	/* Read make the complete src map.  If no channels are enabled
452 	 * the SDQCR must be 0 or else QMan will assert errors
453 	 */
454 	dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
455 	if (dqsrc != 0)
456 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
457 	else
458 		qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
459 }
460 
461 #define QB_VDQCR_VERB_DCT_SHIFT    0
462 #define QB_VDQCR_VERB_DT_SHIFT     2
463 #define QB_VDQCR_VERB_RLS_SHIFT    4
464 #define QB_VDQCR_VERB_WAE_SHIFT    5
465 
466 enum qb_pull_dt_e {
467 	qb_pull_dt_channel,
468 	qb_pull_dt_workqueue,
469 	qb_pull_dt_framequeue
470 };
471 
472 /**
473  * qbman_pull_desc_clear() - Clear the contents of a descriptor to
474  *                           default/starting state
475  * @d: the pull dequeue descriptor to be cleared
476  */
477 void qbman_pull_desc_clear(struct qbman_pull_desc *d)
478 {
479 	memset(d, 0, sizeof(*d));
480 }
481 
482 /**
483  * qbman_pull_desc_set_storage()- Set the pull dequeue storage
484  * @d:            the pull dequeue descriptor to be set
485  * @storage:      the pointer of the memory to store the dequeue result
486  * @storage_phys: the physical address of the storage memory
487  * @stash:        to indicate whether write allocate is enabled
488  *
489  * If not called, or if called with 'storage' as NULL, the result pull dequeues
490  * will produce results to DQRR. If 'storage' is non-NULL, then results are
491  * produced to the given memory location (using the DMA address which
492  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
493  * those writes to main-memory express a cache-warming attribute.
494  */
495 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
496 				 struct dpaa2_dq *storage,
497 				 dma_addr_t storage_phys,
498 				 int stash)
499 {
500 	/* save the virtual address */
501 	d->rsp_addr_virt = (u64)(uintptr_t)storage;
502 
503 	if (!storage) {
504 		d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
505 		return;
506 	}
507 	d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
508 	if (stash)
509 		d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
510 	else
511 		d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
512 
513 	d->rsp_addr = cpu_to_le64(storage_phys);
514 }
515 
516 /**
517  * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
518  * @d:         the pull dequeue descriptor to be set
519  * @numframes: number of frames to be set, must be between 1 and 16, inclusive
520  */
521 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
522 {
523 	d->numf = numframes - 1;
524 }
525 
526 /*
527  * Exactly one of the following descriptor "actions" should be set. (Calling any
528  * one of these will replace the effect of any prior call to one of these.)
529  * - pull dequeue from the given frame queue (FQ)
530  * - pull dequeue from any FQ in the given work queue (WQ)
531  * - pull dequeue from any FQ in any WQ in the given channel
532  */
533 
534 /**
535  * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
536  * @fqid: the frame queue index of the given FQ
537  */
538 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
539 {
540 	d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
541 	d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
542 	d->dq_src = cpu_to_le32(fqid);
543 }
544 
545 /**
546  * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
547  * @wqid: composed of channel id and wqid within the channel
548  * @dct:  the dequeue command type
549  */
550 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
551 			    enum qbman_pull_type_e dct)
552 {
553 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
554 	d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
555 	d->dq_src = cpu_to_le32(wqid);
556 }
557 
558 /**
559  * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
560  *                                 dequeues
561  * @chid: the channel id to be dequeued
562  * @dct:  the dequeue command type
563  */
564 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
565 				 enum qbman_pull_type_e dct)
566 {
567 	d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
568 	d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
569 	d->dq_src = cpu_to_le32(chid);
570 }
571 
572 /**
573  * qbman_swp_pull() - Issue the pull dequeue command
574  * @s: the software portal object
575  * @d: the software portal descriptor which has been configured with
576  *     the set of qbman_pull_desc_set_*() calls
577  *
578  * Return 0 for success, and -EBUSY if the software portal is not ready
579  * to do pull dequeue.
580  */
581 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
582 {
583 	struct qbman_pull_desc *p;
584 
585 	if (!atomic_dec_and_test(&s->vdq.available)) {
586 		atomic_inc(&s->vdq.available);
587 		return -EBUSY;
588 	}
589 	s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
590 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
591 	p->numf = d->numf;
592 	p->tok = QMAN_DQ_TOKEN_VALID;
593 	p->dq_src = d->dq_src;
594 	p->rsp_addr = d->rsp_addr;
595 	p->rsp_addr_virt = d->rsp_addr_virt;
596 	dma_wmb();
597 
598 	/* Set the verb byte, have to substitute in the valid-bit */
599 	p->verb = d->verb | s->vdq.valid_bit;
600 	s->vdq.valid_bit ^= QB_VALID_BIT;
601 
602 	return 0;
603 }
604 
605 #define QMAN_DQRR_PI_MASK   0xf
606 
607 /**
608  * qbman_swp_dqrr_next() - Get an valid DQRR entry
609  * @s: the software portal object
610  *
611  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
612  * only once, so repeated calls can return a sequence of DQRR entries, without
613  * requiring they be consumed immediately or in any particular order.
614  */
615 const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
616 {
617 	u32 verb;
618 	u32 response_verb;
619 	u32 flags;
620 	struct dpaa2_dq *p;
621 
622 	/* Before using valid-bit to detect if something is there, we have to
623 	 * handle the case of the DQRR reset bug...
624 	 */
625 	if (unlikely(s->dqrr.reset_bug)) {
626 		/*
627 		 * We pick up new entries by cache-inhibited producer index,
628 		 * which means that a non-coherent mapping would require us to
629 		 * invalidate and read *only* once that PI has indicated that
630 		 * there's an entry here. The first trip around the DQRR ring
631 		 * will be much less efficient than all subsequent trips around
632 		 * it...
633 		 */
634 		u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
635 			QMAN_DQRR_PI_MASK;
636 
637 		/* there are new entries if pi != next_idx */
638 		if (pi == s->dqrr.next_idx)
639 			return NULL;
640 
641 		/*
642 		 * if next_idx is/was the last ring index, and 'pi' is
643 		 * different, we can disable the workaround as all the ring
644 		 * entries have now been DMA'd to so valid-bit checking is
645 		 * repaired. Note: this logic needs to be based on next_idx
646 		 * (which increments one at a time), rather than on pi (which
647 		 * can burst and wrap-around between our snapshots of it).
648 		 */
649 		if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
650 			pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
651 				 s->dqrr.next_idx, pi);
652 			s->dqrr.reset_bug = 0;
653 		}
654 		prefetch(qbman_get_cmd(s,
655 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
656 	}
657 
658 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
659 	verb = p->dq.verb;
660 
661 	/*
662 	 * If the valid-bit isn't of the expected polarity, nothing there. Note,
663 	 * in the DQRR reset bug workaround, we shouldn't need to skip these
664 	 * check, because we've already determined that a new entry is available
665 	 * and we've invalidated the cacheline before reading it, so the
666 	 * valid-bit behaviour is repaired and should tell us what we already
667 	 * knew from reading PI.
668 	 */
669 	if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
670 		prefetch(qbman_get_cmd(s,
671 				       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
672 		return NULL;
673 	}
674 	/*
675 	 * There's something there. Move "next_idx" attention to the next ring
676 	 * entry (and prefetch it) before returning what we found.
677 	 */
678 	s->dqrr.next_idx++;
679 	s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
680 	if (!s->dqrr.next_idx)
681 		s->dqrr.valid_bit ^= QB_VALID_BIT;
682 
683 	/*
684 	 * If this is the final response to a volatile dequeue command
685 	 * indicate that the vdq is available
686 	 */
687 	flags = p->dq.stat;
688 	response_verb = verb & QBMAN_RESULT_MASK;
689 	if ((response_verb == QBMAN_RESULT_DQ) &&
690 	    (flags & DPAA2_DQ_STAT_VOLATILE) &&
691 	    (flags & DPAA2_DQ_STAT_EXPIRED))
692 		atomic_inc(&s->vdq.available);
693 
694 	prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
695 
696 	return p;
697 }
698 
699 /**
700  * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
701  *                             qbman_swp_dqrr_next().
702  * @s: the software portal object
703  * @dq: the DQRR entry to be consumed
704  */
705 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
706 {
707 	qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
708 }
709 
710 /**
711  * qbman_result_has_new_result() - Check and get the dequeue response from the
712  *                                 dq storage memory set in pull dequeue command
713  * @s: the software portal object
714  * @dq: the dequeue result read from the memory
715  *
716  * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
717  * dequeue result.
718  *
719  * Only used for user-provided storage of dequeue results, not DQRR. For
720  * efficiency purposes, the driver will perform any required endianness
721  * conversion to ensure that the user's dequeue result storage is in host-endian
722  * format. As such, once the user has called qbman_result_has_new_result() and
723  * been returned a valid dequeue result, they should not call it again on
724  * the same memory location (except of course if another dequeue command has
725  * been executed to produce a new result to that location).
726  */
727 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
728 {
729 	if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
730 		return 0;
731 
732 	/*
733 	 * Set token to be 0 so we will detect change back to 1
734 	 * next time the looping is traversed. Const is cast away here
735 	 * as we want users to treat the dequeue responses as read only.
736 	 */
737 	((struct dpaa2_dq *)dq)->dq.tok = 0;
738 
739 	/*
740 	 * Determine whether VDQCR is available based on whether the
741 	 * current result is sitting in the first storage location of
742 	 * the busy command.
743 	 */
744 	if (s->vdq.storage == dq) {
745 		s->vdq.storage = NULL;
746 		atomic_inc(&s->vdq.available);
747 	}
748 
749 	return 1;
750 }
751 
752 /**
753  * qbman_release_desc_clear() - Clear the contents of a descriptor to
754  *                              default/starting state.
755  */
756 void qbman_release_desc_clear(struct qbman_release_desc *d)
757 {
758 	memset(d, 0, sizeof(*d));
759 	d->verb = 1 << 5; /* Release Command Valid */
760 }
761 
762 /**
763  * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
764  */
765 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
766 {
767 	d->bpid = cpu_to_le16(bpid);
768 }
769 
770 /**
771  * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
772  * interrupt source should be asserted after the release command is completed.
773  */
774 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
775 {
776 	if (enable)
777 		d->verb |= 1 << 6;
778 	else
779 		d->verb &= ~(1 << 6);
780 }
781 
782 #define RAR_IDX(rar)     ((rar) & 0x7)
783 #define RAR_VB(rar)      ((rar) & 0x80)
784 #define RAR_SUCCESS(rar) ((rar) & 0x100)
785 
786 /**
787  * qbman_swp_release() - Issue a buffer release command
788  * @s:           the software portal object
789  * @d:           the release descriptor
790  * @buffers:     a pointer pointing to the buffer address to be released
791  * @num_buffers: number of buffers to be released,  must be less than 8
792  *
793  * Return 0 for success, -EBUSY if the release command ring is not ready.
794  */
795 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
796 		      const u64 *buffers, unsigned int num_buffers)
797 {
798 	int i;
799 	struct qbman_release_desc *p;
800 	u32 rar;
801 
802 	if (!num_buffers || (num_buffers > 7))
803 		return -EINVAL;
804 
805 	rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
806 	if (!RAR_SUCCESS(rar))
807 		return -EBUSY;
808 
809 	/* Start the release command */
810 	p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
811 	/* Copy the caller's buffer pointers to the command */
812 	for (i = 0; i < num_buffers; i++)
813 		p->buf[i] = cpu_to_le64(buffers[i]);
814 	p->bpid = d->bpid;
815 
816 	/*
817 	 * Set the verb byte, have to substitute in the valid-bit and the number
818 	 * of buffers.
819 	 */
820 	dma_wmb();
821 	p->verb = d->verb | RAR_VB(rar) | num_buffers;
822 
823 	return 0;
824 }
825 
826 struct qbman_acquire_desc {
827 	u8 verb;
828 	u8 reserved;
829 	__le16 bpid;
830 	u8 num;
831 	u8 reserved2[59];
832 };
833 
834 struct qbman_acquire_rslt {
835 	u8 verb;
836 	u8 rslt;
837 	__le16 reserved;
838 	u8 num;
839 	u8 reserved2[3];
840 	__le64 buf[7];
841 };
842 
843 /**
844  * qbman_swp_acquire() - Issue a buffer acquire command
845  * @s:           the software portal object
846  * @bpid:        the buffer pool index
847  * @buffers:     a pointer pointing to the acquired buffer addresses
848  * @num_buffers: number of buffers to be acquired, must be less than 8
849  *
850  * Return 0 for success, or negative error code if the acquire command
851  * fails.
852  */
853 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
854 		      unsigned int num_buffers)
855 {
856 	struct qbman_acquire_desc *p;
857 	struct qbman_acquire_rslt *r;
858 	int i;
859 
860 	if (!num_buffers || (num_buffers > 7))
861 		return -EINVAL;
862 
863 	/* Start the management command */
864 	p = qbman_swp_mc_start(s);
865 
866 	if (!p)
867 		return -EBUSY;
868 
869 	/* Encode the caller-provided attributes */
870 	p->bpid = cpu_to_le16(bpid);
871 	p->num = num_buffers;
872 
873 	/* Complete the management command */
874 	r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
875 	if (unlikely(!r)) {
876 		pr_err("qbman: acquire from BPID %d failed, no response\n",
877 		       bpid);
878 		return -EIO;
879 	}
880 
881 	/* Decode the outcome */
882 	WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
883 
884 	/* Determine success or failure */
885 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
886 		pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
887 		       bpid, r->rslt);
888 		return -EIO;
889 	}
890 
891 	WARN_ON(r->num > num_buffers);
892 
893 	/* Copy the acquired buffers to the caller's array */
894 	for (i = 0; i < r->num; i++)
895 		buffers[i] = le64_to_cpu(r->buf[i]);
896 
897 	return (int)r->num;
898 }
899 
900 struct qbman_alt_fq_state_desc {
901 	u8 verb;
902 	u8 reserved[3];
903 	__le32 fqid;
904 	u8 reserved2[56];
905 };
906 
907 struct qbman_alt_fq_state_rslt {
908 	u8 verb;
909 	u8 rslt;
910 	u8 reserved[62];
911 };
912 
913 #define ALT_FQ_FQID_MASK 0x00FFFFFF
914 
915 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
916 			   u8 alt_fq_verb)
917 {
918 	struct qbman_alt_fq_state_desc *p;
919 	struct qbman_alt_fq_state_rslt *r;
920 
921 	/* Start the management command */
922 	p = qbman_swp_mc_start(s);
923 	if (!p)
924 		return -EBUSY;
925 
926 	p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
927 
928 	/* Complete the management command */
929 	r = qbman_swp_mc_complete(s, p, alt_fq_verb);
930 	if (unlikely(!r)) {
931 		pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
932 		       alt_fq_verb);
933 		return -EIO;
934 	}
935 
936 	/* Decode the outcome */
937 	WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
938 
939 	/* Determine success or failure */
940 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
941 		pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
942 		       fqid, r->verb, r->rslt);
943 		return -EIO;
944 	}
945 
946 	return 0;
947 }
948 
949 struct qbman_cdan_ctrl_desc {
950 	u8 verb;
951 	u8 reserved;
952 	__le16 ch;
953 	u8 we;
954 	u8 ctrl;
955 	__le16 reserved2;
956 	__le64 cdan_ctx;
957 	u8 reserved3[48];
958 
959 };
960 
961 struct qbman_cdan_ctrl_rslt {
962 	u8 verb;
963 	u8 rslt;
964 	__le16 ch;
965 	u8 reserved[60];
966 };
967 
968 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
969 		       u8 we_mask, u8 cdan_en,
970 		       u64 ctx)
971 {
972 	struct qbman_cdan_ctrl_desc *p = NULL;
973 	struct qbman_cdan_ctrl_rslt *r = NULL;
974 
975 	/* Start the management command */
976 	p = qbman_swp_mc_start(s);
977 	if (!p)
978 		return -EBUSY;
979 
980 	/* Encode the caller-provided attributes */
981 	p->ch = cpu_to_le16(channelid);
982 	p->we = we_mask;
983 	if (cdan_en)
984 		p->ctrl = 1;
985 	else
986 		p->ctrl = 0;
987 	p->cdan_ctx = cpu_to_le64(ctx);
988 
989 	/* Complete the management command */
990 	r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
991 	if (unlikely(!r)) {
992 		pr_err("qbman: wqchan config failed, no response\n");
993 		return -EIO;
994 	}
995 
996 	WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
997 
998 	/* Determine success or failure */
999 	if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1000 		pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1001 		       channelid, r->rslt);
1002 		return -EIO;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 #define QBMAN_RESPONSE_VERB_MASK	0x7f
1009 #define QBMAN_FQ_QUERY_NP		0x45
1010 #define QBMAN_BP_QUERY			0x32
1011 
1012 struct qbman_fq_query_desc {
1013 	u8 verb;
1014 	u8 reserved[3];
1015 	__le32 fqid;
1016 	u8 reserved2[56];
1017 };
1018 
1019 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1020 			 struct qbman_fq_query_np_rslt *r)
1021 {
1022 	struct qbman_fq_query_desc *p;
1023 	void *resp;
1024 
1025 	p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1026 	if (!p)
1027 		return -EBUSY;
1028 
1029 	/* FQID is a 24 bit value */
1030 	p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1031 	resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1032 	if (!resp) {
1033 		pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1034 		       fqid);
1035 		return -EIO;
1036 	}
1037 	*r = *(struct qbman_fq_query_np_rslt *)resp;
1038 	/* Decode the outcome */
1039 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1040 
1041 	/* Determine success or failure */
1042 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1043 		pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1044 		       p->fqid, r->rslt);
1045 		return -EIO;
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1052 {
1053 	return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1054 }
1055 
1056 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1057 {
1058 	return le32_to_cpu(r->byte_cnt);
1059 }
1060 
1061 struct qbman_bp_query_desc {
1062 	u8 verb;
1063 	u8 reserved;
1064 	__le16 bpid;
1065 	u8 reserved2[60];
1066 };
1067 
1068 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1069 		   struct qbman_bp_query_rslt *r)
1070 {
1071 	struct qbman_bp_query_desc *p;
1072 	void *resp;
1073 
1074 	p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1075 	if (!p)
1076 		return -EBUSY;
1077 
1078 	p->bpid = cpu_to_le16(bpid);
1079 	resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1080 	if (!resp) {
1081 		pr_err("qbman: Query BPID %d fields failed, no response\n",
1082 		       bpid);
1083 		return -EIO;
1084 	}
1085 	*r = *(struct qbman_bp_query_rslt *)resp;
1086 	/* Decode the outcome */
1087 	WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1088 
1089 	/* Determine success or failure */
1090 	if (r->rslt != QBMAN_MC_RSLT_OK) {
1091 		pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1092 		       bpid, r->rslt);
1093 		return -EIO;
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1100 {
1101 	return le32_to_cpu(a->fill);
1102 }
1103