1c535e923SClaudiu Manoil /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2c535e923SClaudiu Manoil *
3c535e923SClaudiu Manoil * Redistribution and use in source and binary forms, with or without
4c535e923SClaudiu Manoil * modification, are permitted provided that the following conditions are met:
5c535e923SClaudiu Manoil * * Redistributions of source code must retain the above copyright
6c535e923SClaudiu Manoil * notice, this list of conditions and the following disclaimer.
7c535e923SClaudiu Manoil * * Redistributions in binary form must reproduce the above copyright
8c535e923SClaudiu Manoil * notice, this list of conditions and the following disclaimer in the
9c535e923SClaudiu Manoil * documentation and/or other materials provided with the distribution.
10c535e923SClaudiu Manoil * * Neither the name of Freescale Semiconductor nor the
11c535e923SClaudiu Manoil * names of its contributors may be used to endorse or promote products
12c535e923SClaudiu Manoil * derived from this software without specific prior written permission.
13c535e923SClaudiu Manoil *
14c535e923SClaudiu Manoil * ALTERNATIVELY, this software may be distributed under the terms of the
15c535e923SClaudiu Manoil * GNU General Public License ("GPL") as published by the Free Software
16c535e923SClaudiu Manoil * Foundation, either version 2 of that License or (at your option) any
17c535e923SClaudiu Manoil * later version.
18c535e923SClaudiu Manoil *
19c535e923SClaudiu Manoil * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20c535e923SClaudiu Manoil * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21c535e923SClaudiu Manoil * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22c535e923SClaudiu Manoil * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23c535e923SClaudiu Manoil * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24c535e923SClaudiu Manoil * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25c535e923SClaudiu Manoil * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26c535e923SClaudiu Manoil * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27c535e923SClaudiu Manoil * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28c535e923SClaudiu Manoil * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29c535e923SClaudiu Manoil */
30c535e923SClaudiu Manoil
31c535e923SClaudiu Manoil #include "qman_priv.h"
32c535e923SClaudiu Manoil
33c535e923SClaudiu Manoil #define DQRR_MAXFILL 15
34c535e923SClaudiu Manoil #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35c535e923SClaudiu Manoil #define IRQNAME "QMan portal %d"
36c535e923SClaudiu Manoil #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37c535e923SClaudiu Manoil #define QMAN_POLL_LIMIT 32
38c535e923SClaudiu Manoil #define QMAN_PIRQ_DQRR_ITHRESH 12
395c664aceSMadalin Bucur #define QMAN_DQRR_IT_MAX 15
405c664aceSMadalin Bucur #define QMAN_ITP_MAX 0xFFF
41c535e923SClaudiu Manoil #define QMAN_PIRQ_MR_ITHRESH 4
42c535e923SClaudiu Manoil #define QMAN_PIRQ_IPERIOD 100
43c535e923SClaudiu Manoil
44c535e923SClaudiu Manoil /* Portal register assists */
45c535e923SClaudiu Manoil
4621772c43SMadalin Bucur #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4721772c43SMadalin Bucur /* Cache-inhibited register offsets */
4821772c43SMadalin Bucur #define QM_REG_EQCR_PI_CINH 0x3000
4921772c43SMadalin Bucur #define QM_REG_EQCR_CI_CINH 0x3040
5021772c43SMadalin Bucur #define QM_REG_EQCR_ITR 0x3080
5121772c43SMadalin Bucur #define QM_REG_DQRR_PI_CINH 0x3100
5221772c43SMadalin Bucur #define QM_REG_DQRR_CI_CINH 0x3140
5321772c43SMadalin Bucur #define QM_REG_DQRR_ITR 0x3180
5421772c43SMadalin Bucur #define QM_REG_DQRR_DCAP 0x31C0
5521772c43SMadalin Bucur #define QM_REG_DQRR_SDQCR 0x3200
5621772c43SMadalin Bucur #define QM_REG_DQRR_VDQCR 0x3240
5721772c43SMadalin Bucur #define QM_REG_DQRR_PDQCR 0x3280
5821772c43SMadalin Bucur #define QM_REG_MR_PI_CINH 0x3300
5921772c43SMadalin Bucur #define QM_REG_MR_CI_CINH 0x3340
6021772c43SMadalin Bucur #define QM_REG_MR_ITR 0x3380
6121772c43SMadalin Bucur #define QM_REG_CFG 0x3500
6221772c43SMadalin Bucur #define QM_REG_ISR 0x3600
6321772c43SMadalin Bucur #define QM_REG_IER 0x3640
6421772c43SMadalin Bucur #define QM_REG_ISDR 0x3680
6521772c43SMadalin Bucur #define QM_REG_IIR 0x36C0
6621772c43SMadalin Bucur #define QM_REG_ITPR 0x3740
6721772c43SMadalin Bucur
6821772c43SMadalin Bucur /* Cache-enabled register offsets */
6921772c43SMadalin Bucur #define QM_CL_EQCR 0x0000
7021772c43SMadalin Bucur #define QM_CL_DQRR 0x1000
7121772c43SMadalin Bucur #define QM_CL_MR 0x2000
7221772c43SMadalin Bucur #define QM_CL_EQCR_PI_CENA 0x3000
7321772c43SMadalin Bucur #define QM_CL_EQCR_CI_CENA 0x3040
7421772c43SMadalin Bucur #define QM_CL_DQRR_PI_CENA 0x3100
7521772c43SMadalin Bucur #define QM_CL_DQRR_CI_CENA 0x3140
7621772c43SMadalin Bucur #define QM_CL_MR_PI_CENA 0x3300
7721772c43SMadalin Bucur #define QM_CL_MR_CI_CENA 0x3340
7821772c43SMadalin Bucur #define QM_CL_CR 0x3800
7921772c43SMadalin Bucur #define QM_CL_RR0 0x3900
8021772c43SMadalin Bucur #define QM_CL_RR1 0x3940
8121772c43SMadalin Bucur
8221772c43SMadalin Bucur #else
83c535e923SClaudiu Manoil /* Cache-inhibited register offsets */
84c535e923SClaudiu Manoil #define QM_REG_EQCR_PI_CINH 0x0000
85c535e923SClaudiu Manoil #define QM_REG_EQCR_CI_CINH 0x0004
86c535e923SClaudiu Manoil #define QM_REG_EQCR_ITR 0x0008
87c535e923SClaudiu Manoil #define QM_REG_DQRR_PI_CINH 0x0040
88c535e923SClaudiu Manoil #define QM_REG_DQRR_CI_CINH 0x0044
89c535e923SClaudiu Manoil #define QM_REG_DQRR_ITR 0x0048
90c535e923SClaudiu Manoil #define QM_REG_DQRR_DCAP 0x0050
91c535e923SClaudiu Manoil #define QM_REG_DQRR_SDQCR 0x0054
92c535e923SClaudiu Manoil #define QM_REG_DQRR_VDQCR 0x0058
93c535e923SClaudiu Manoil #define QM_REG_DQRR_PDQCR 0x005c
94c535e923SClaudiu Manoil #define QM_REG_MR_PI_CINH 0x0080
95c535e923SClaudiu Manoil #define QM_REG_MR_CI_CINH 0x0084
96c535e923SClaudiu Manoil #define QM_REG_MR_ITR 0x0088
97c535e923SClaudiu Manoil #define QM_REG_CFG 0x0100
98c535e923SClaudiu Manoil #define QM_REG_ISR 0x0e00
99c535e923SClaudiu Manoil #define QM_REG_IER 0x0e04
100c535e923SClaudiu Manoil #define QM_REG_ISDR 0x0e08
101c535e923SClaudiu Manoil #define QM_REG_IIR 0x0e0c
102c535e923SClaudiu Manoil #define QM_REG_ITPR 0x0e14
103c535e923SClaudiu Manoil
104c535e923SClaudiu Manoil /* Cache-enabled register offsets */
105c535e923SClaudiu Manoil #define QM_CL_EQCR 0x0000
106c535e923SClaudiu Manoil #define QM_CL_DQRR 0x1000
107c535e923SClaudiu Manoil #define QM_CL_MR 0x2000
108c535e923SClaudiu Manoil #define QM_CL_EQCR_PI_CENA 0x3000
109c535e923SClaudiu Manoil #define QM_CL_EQCR_CI_CENA 0x3100
110c535e923SClaudiu Manoil #define QM_CL_DQRR_PI_CENA 0x3200
111c535e923SClaudiu Manoil #define QM_CL_DQRR_CI_CENA 0x3300
112c535e923SClaudiu Manoil #define QM_CL_MR_PI_CENA 0x3400
113c535e923SClaudiu Manoil #define QM_CL_MR_CI_CENA 0x3500
114c535e923SClaudiu Manoil #define QM_CL_CR 0x3800
115c535e923SClaudiu Manoil #define QM_CL_RR0 0x3900
116c535e923SClaudiu Manoil #define QM_CL_RR1 0x3940
11721772c43SMadalin Bucur #endif
118c535e923SClaudiu Manoil
119c535e923SClaudiu Manoil /*
120c535e923SClaudiu Manoil * BTW, the drivers (and h/w programming model) already obtain the required
121c535e923SClaudiu Manoil * synchronisation for portal accesses and data-dependencies. Use of barrier()s
122c535e923SClaudiu Manoil * or other order-preserving primitives simply degrade performance. Hence the
123c535e923SClaudiu Manoil * use of the __raw_*() interfaces, which simply ensure that the compiler treats
124c535e923SClaudiu Manoil * the portal registers as volatile
125c535e923SClaudiu Manoil */
126c535e923SClaudiu Manoil
127c535e923SClaudiu Manoil /* Cache-enabled ring access */
128c535e923SClaudiu Manoil #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
129c535e923SClaudiu Manoil
130c535e923SClaudiu Manoil /*
131c535e923SClaudiu Manoil * Portal modes.
132c535e923SClaudiu Manoil * Enum types;
133c535e923SClaudiu Manoil * pmode == production mode
134c535e923SClaudiu Manoil * cmode == consumption mode,
135c535e923SClaudiu Manoil * dmode == h/w dequeue mode.
136c535e923SClaudiu Manoil * Enum values use 3 letter codes. First letter matches the portal mode,
137c535e923SClaudiu Manoil * remaining two letters indicate;
138c535e923SClaudiu Manoil * ci == cache-inhibited portal register
139c535e923SClaudiu Manoil * ce == cache-enabled portal register
140c535e923SClaudiu Manoil * vb == in-band valid-bit (cache-enabled)
141c535e923SClaudiu Manoil * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
142c535e923SClaudiu Manoil * As for "enum qm_dqrr_dmode", it should be self-explanatory.
143c535e923SClaudiu Manoil */
144c535e923SClaudiu Manoil enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
145c535e923SClaudiu Manoil qm_eqcr_pci = 0, /* PI index, cache-inhibited */
146c535e923SClaudiu Manoil qm_eqcr_pce = 1, /* PI index, cache-enabled */
147c535e923SClaudiu Manoil qm_eqcr_pvb = 2 /* valid-bit */
148c535e923SClaudiu Manoil };
149c535e923SClaudiu Manoil enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
150c535e923SClaudiu Manoil qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
151c535e923SClaudiu Manoil qm_dqrr_dpull = 1 /* PDQCR */
152c535e923SClaudiu Manoil };
153c535e923SClaudiu Manoil enum qm_dqrr_pmode { /* s/w-only */
154c535e923SClaudiu Manoil qm_dqrr_pci, /* reads DQRR_PI_CINH */
155c535e923SClaudiu Manoil qm_dqrr_pce, /* reads DQRR_PI_CENA */
156c535e923SClaudiu Manoil qm_dqrr_pvb /* reads valid-bit */
157c535e923SClaudiu Manoil };
158c535e923SClaudiu Manoil enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
159c535e923SClaudiu Manoil qm_dqrr_cci = 0, /* CI index, cache-inhibited */
160c535e923SClaudiu Manoil qm_dqrr_cce = 1, /* CI index, cache-enabled */
161c535e923SClaudiu Manoil qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
162c535e923SClaudiu Manoil };
163c535e923SClaudiu Manoil enum qm_mr_pmode { /* s/w-only */
164c535e923SClaudiu Manoil qm_mr_pci, /* reads MR_PI_CINH */
165c535e923SClaudiu Manoil qm_mr_pce, /* reads MR_PI_CENA */
166c535e923SClaudiu Manoil qm_mr_pvb /* reads valid-bit */
167c535e923SClaudiu Manoil };
168c535e923SClaudiu Manoil enum qm_mr_cmode { /* matches QCSP_CFG::MM */
169c535e923SClaudiu Manoil qm_mr_cci = 0, /* CI index, cache-inhibited */
170c535e923SClaudiu Manoil qm_mr_cce = 1 /* CI index, cache-enabled */
171c535e923SClaudiu Manoil };
172c535e923SClaudiu Manoil
173c535e923SClaudiu Manoil /* --- Portal structures --- */
174c535e923SClaudiu Manoil
175c535e923SClaudiu Manoil #define QM_EQCR_SIZE 8
176c535e923SClaudiu Manoil #define QM_DQRR_SIZE 16
177c535e923SClaudiu Manoil #define QM_MR_SIZE 8
178c535e923SClaudiu Manoil
179c535e923SClaudiu Manoil /* "Enqueue Command" */
180c535e923SClaudiu Manoil struct qm_eqcr_entry {
181c535e923SClaudiu Manoil u8 _ncw_verb; /* writes to this are non-coherent */
182c535e923SClaudiu Manoil u8 dca;
18318058822SClaudiu Manoil __be16 seqnum;
184b5399452SClaudiu Manoil u8 __reserved[4];
18518058822SClaudiu Manoil __be32 fqid; /* 24-bit */
18618058822SClaudiu Manoil __be32 tag;
187c535e923SClaudiu Manoil struct qm_fd fd;
188c535e923SClaudiu Manoil u8 __reserved3[32];
189040f3119SArnd Bergmann } __packed __aligned(8);
190c535e923SClaudiu Manoil #define QM_EQCR_VERB_VBIT 0x80
191c535e923SClaudiu Manoil #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
192c535e923SClaudiu Manoil #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
193c535e923SClaudiu Manoil #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
194c535e923SClaudiu Manoil #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
195c535e923SClaudiu Manoil #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
196c535e923SClaudiu Manoil
197c535e923SClaudiu Manoil struct qm_eqcr {
198c535e923SClaudiu Manoil struct qm_eqcr_entry *ring, *cursor;
199c535e923SClaudiu Manoil u8 ci, available, ithresh, vbit;
200c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
201c535e923SClaudiu Manoil u32 busy;
202c535e923SClaudiu Manoil enum qm_eqcr_pmode pmode;
203c535e923SClaudiu Manoil #endif
204c535e923SClaudiu Manoil };
205c535e923SClaudiu Manoil
206c535e923SClaudiu Manoil struct qm_dqrr {
207c535e923SClaudiu Manoil const struct qm_dqrr_entry *ring, *cursor;
208c535e923SClaudiu Manoil u8 pi, ci, fill, ithresh, vbit;
209c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
210c535e923SClaudiu Manoil enum qm_dqrr_dmode dmode;
211c535e923SClaudiu Manoil enum qm_dqrr_pmode pmode;
212c535e923SClaudiu Manoil enum qm_dqrr_cmode cmode;
213c535e923SClaudiu Manoil #endif
214c535e923SClaudiu Manoil };
215c535e923SClaudiu Manoil
216c535e923SClaudiu Manoil struct qm_mr {
217c535e923SClaudiu Manoil union qm_mr_entry *ring, *cursor;
218c535e923SClaudiu Manoil u8 pi, ci, fill, ithresh, vbit;
219c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
220c535e923SClaudiu Manoil enum qm_mr_pmode pmode;
221c535e923SClaudiu Manoil enum qm_mr_cmode cmode;
222c535e923SClaudiu Manoil #endif
223c535e923SClaudiu Manoil };
224c535e923SClaudiu Manoil
225c535e923SClaudiu Manoil /* MC (Management Command) command */
2267ff07da0SClaudiu Manoil /* "FQ" command layout */
2277ff07da0SClaudiu Manoil struct qm_mcc_fq {
228c535e923SClaudiu Manoil u8 _ncw_verb;
229c535e923SClaudiu Manoil u8 __reserved1[3];
23018058822SClaudiu Manoil __be32 fqid; /* 24-bit */
231c535e923SClaudiu Manoil u8 __reserved2[56];
232c535e923SClaudiu Manoil } __packed;
233c535e923SClaudiu Manoil
2347ff07da0SClaudiu Manoil /* "CGR" command layout */
2357ff07da0SClaudiu Manoil struct qm_mcc_cgr {
236c535e923SClaudiu Manoil u8 _ncw_verb;
237c535e923SClaudiu Manoil u8 __reserved1[30];
238c535e923SClaudiu Manoil u8 cgid;
239c535e923SClaudiu Manoil u8 __reserved2[32];
240c535e923SClaudiu Manoil };
241c535e923SClaudiu Manoil
242c535e923SClaudiu Manoil #define QM_MCC_VERB_VBIT 0x80
243c535e923SClaudiu Manoil #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
244c535e923SClaudiu Manoil #define QM_MCC_VERB_INITFQ_PARKED 0x40
245c535e923SClaudiu Manoil #define QM_MCC_VERB_INITFQ_SCHED 0x41
246c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYFQ 0x44
247c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
248c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYWQ 0x46
249c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
250c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
251c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
252c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
253c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
254c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
255c535e923SClaudiu Manoil #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
256c535e923SClaudiu Manoil #define QM_MCC_VERB_INITCGR 0x50
257c535e923SClaudiu Manoil #define QM_MCC_VERB_MODIFYCGR 0x51
258c535e923SClaudiu Manoil #define QM_MCC_VERB_CGRTESTWRITE 0x52
259c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYCGR 0x58
260c535e923SClaudiu Manoil #define QM_MCC_VERB_QUERYCONGESTION 0x59
261c535e923SClaudiu Manoil union qm_mc_command {
262c535e923SClaudiu Manoil struct {
263c535e923SClaudiu Manoil u8 _ncw_verb; /* writes to this are non-coherent */
264c535e923SClaudiu Manoil u8 __reserved[63];
265c535e923SClaudiu Manoil };
266c535e923SClaudiu Manoil struct qm_mcc_initfq initfq;
267c535e923SClaudiu Manoil struct qm_mcc_initcgr initcgr;
2687ff07da0SClaudiu Manoil struct qm_mcc_fq fq;
2697ff07da0SClaudiu Manoil struct qm_mcc_cgr cgr;
270c535e923SClaudiu Manoil };
271c535e923SClaudiu Manoil
272c535e923SClaudiu Manoil /* MC (Management Command) result */
273c535e923SClaudiu Manoil /* "Query FQ" */
274c535e923SClaudiu Manoil struct qm_mcr_queryfq {
275c535e923SClaudiu Manoil u8 verb;
276c535e923SClaudiu Manoil u8 result;
277c535e923SClaudiu Manoil u8 __reserved1[8];
278c535e923SClaudiu Manoil struct qm_fqd fqd; /* the FQD fields are here */
279c535e923SClaudiu Manoil u8 __reserved2[30];
280c535e923SClaudiu Manoil } __packed;
281c535e923SClaudiu Manoil
282c535e923SClaudiu Manoil /* "Alter FQ State Commands" */
283c535e923SClaudiu Manoil struct qm_mcr_alterfq {
284c535e923SClaudiu Manoil u8 verb;
285c535e923SClaudiu Manoil u8 result;
286c535e923SClaudiu Manoil u8 fqs; /* Frame Queue Status */
287c535e923SClaudiu Manoil u8 __reserved1[61];
288c535e923SClaudiu Manoil };
289c535e923SClaudiu Manoil #define QM_MCR_VERB_RRID 0x80
290c535e923SClaudiu Manoil #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
291c535e923SClaudiu Manoil #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
292c535e923SClaudiu Manoil #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
293c535e923SClaudiu Manoil #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
294c535e923SClaudiu Manoil #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
295c535e923SClaudiu Manoil #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
296c535e923SClaudiu Manoil #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
297c535e923SClaudiu Manoil #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
298c535e923SClaudiu Manoil #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
299c535e923SClaudiu Manoil #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
300c535e923SClaudiu Manoil #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
301c535e923SClaudiu Manoil #define QM_MCR_RESULT_NULL 0x00
302c535e923SClaudiu Manoil #define QM_MCR_RESULT_OK 0xf0
303c535e923SClaudiu Manoil #define QM_MCR_RESULT_ERR_FQID 0xf1
304c535e923SClaudiu Manoil #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
305c535e923SClaudiu Manoil #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
306c535e923SClaudiu Manoil #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
307c535e923SClaudiu Manoil #define QM_MCR_RESULT_PENDING 0xf8
308c535e923SClaudiu Manoil #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
309c535e923SClaudiu Manoil #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
310c535e923SClaudiu Manoil #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
311c535e923SClaudiu Manoil #define QM_MCR_TIMEOUT 10000 /* us */
312c535e923SClaudiu Manoil union qm_mc_result {
313c535e923SClaudiu Manoil struct {
314c535e923SClaudiu Manoil u8 verb;
315c535e923SClaudiu Manoil u8 result;
316c535e923SClaudiu Manoil u8 __reserved1[62];
317c535e923SClaudiu Manoil };
318c535e923SClaudiu Manoil struct qm_mcr_queryfq queryfq;
319c535e923SClaudiu Manoil struct qm_mcr_alterfq alterfq;
320c535e923SClaudiu Manoil struct qm_mcr_querycgr querycgr;
321c535e923SClaudiu Manoil struct qm_mcr_querycongestion querycongestion;
322c535e923SClaudiu Manoil struct qm_mcr_querywq querywq;
323c535e923SClaudiu Manoil struct qm_mcr_queryfq_np queryfq_np;
324c535e923SClaudiu Manoil };
325c535e923SClaudiu Manoil
326c535e923SClaudiu Manoil struct qm_mc {
327c535e923SClaudiu Manoil union qm_mc_command *cr;
328c535e923SClaudiu Manoil union qm_mc_result *rr;
329c535e923SClaudiu Manoil u8 rridx, vbit;
330c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
331c535e923SClaudiu Manoil enum {
332c535e923SClaudiu Manoil /* Can be _mc_start()ed */
333c535e923SClaudiu Manoil qman_mc_idle,
334c535e923SClaudiu Manoil /* Can be _mc_commit()ed or _mc_abort()ed */
335c535e923SClaudiu Manoil qman_mc_user,
336c535e923SClaudiu Manoil /* Can only be _mc_retry()ed */
337c535e923SClaudiu Manoil qman_mc_hw
338c535e923SClaudiu Manoil } state;
339c535e923SClaudiu Manoil #endif
340c535e923SClaudiu Manoil };
341c535e923SClaudiu Manoil
342c535e923SClaudiu Manoil struct qm_addr {
343e6e2df69SRoy Pledge void *ce; /* cache-enabled */
344e6e2df69SRoy Pledge __be32 *ce_be; /* same value as above but for direct access */
345c535e923SClaudiu Manoil void __iomem *ci; /* cache-inhibited */
346c535e923SClaudiu Manoil };
347c535e923SClaudiu Manoil
348c535e923SClaudiu Manoil struct qm_portal {
349c535e923SClaudiu Manoil /*
350c535e923SClaudiu Manoil * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
351c535e923SClaudiu Manoil * and including 'mc' fits within a cacheline (yay!). The 'config' part
352c535e923SClaudiu Manoil * is setup-only, so isn't a cause for a concern. In other words, don't
353c535e923SClaudiu Manoil * rearrange this structure on a whim, there be dragons ...
354c535e923SClaudiu Manoil */
355c535e923SClaudiu Manoil struct qm_addr addr;
356c535e923SClaudiu Manoil struct qm_eqcr eqcr;
357c535e923SClaudiu Manoil struct qm_dqrr dqrr;
358c535e923SClaudiu Manoil struct qm_mr mr;
359c535e923SClaudiu Manoil struct qm_mc mc;
360c535e923SClaudiu Manoil } ____cacheline_aligned;
361c535e923SClaudiu Manoil
362c535e923SClaudiu Manoil /* Cache-inhibited register access. */
qm_in(struct qm_portal * p,u32 offset)363c535e923SClaudiu Manoil static inline u32 qm_in(struct qm_portal *p, u32 offset)
364c535e923SClaudiu Manoil {
365e6e2df69SRoy Pledge return ioread32be(p->addr.ci + offset);
366c535e923SClaudiu Manoil }
367c535e923SClaudiu Manoil
qm_out(struct qm_portal * p,u32 offset,u32 val)368c535e923SClaudiu Manoil static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
369c535e923SClaudiu Manoil {
370e6e2df69SRoy Pledge iowrite32be(val, p->addr.ci + offset);
371c535e923SClaudiu Manoil }
372c535e923SClaudiu Manoil
373c535e923SClaudiu Manoil /* Cache Enabled Portal Access */
qm_cl_invalidate(struct qm_portal * p,u32 offset)374c535e923SClaudiu Manoil static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
375c535e923SClaudiu Manoil {
376c535e923SClaudiu Manoil dpaa_invalidate(p->addr.ce + offset);
377c535e923SClaudiu Manoil }
378c535e923SClaudiu Manoil
qm_cl_touch_ro(struct qm_portal * p,u32 offset)379c535e923SClaudiu Manoil static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
380c535e923SClaudiu Manoil {
381c535e923SClaudiu Manoil dpaa_touch_ro(p->addr.ce + offset);
382c535e923SClaudiu Manoil }
383c535e923SClaudiu Manoil
qm_ce_in(struct qm_portal * p,u32 offset)384c535e923SClaudiu Manoil static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
385c535e923SClaudiu Manoil {
386e6e2df69SRoy Pledge return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
387c535e923SClaudiu Manoil }
388c535e923SClaudiu Manoil
389c535e923SClaudiu Manoil /* --- EQCR API --- */
390c535e923SClaudiu Manoil
391c535e923SClaudiu Manoil #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
392c535e923SClaudiu Manoil #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
393c535e923SClaudiu Manoil
394c535e923SClaudiu Manoil /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
eqcr_carryclear(struct qm_eqcr_entry * p)395c535e923SClaudiu Manoil static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
396c535e923SClaudiu Manoil {
397c535e923SClaudiu Manoil uintptr_t addr = (uintptr_t)p;
398c535e923SClaudiu Manoil
399c535e923SClaudiu Manoil addr &= ~EQCR_CARRY;
400c535e923SClaudiu Manoil
401c535e923SClaudiu Manoil return (struct qm_eqcr_entry *)addr;
402c535e923SClaudiu Manoil }
403c535e923SClaudiu Manoil
404c535e923SClaudiu Manoil /* Bit-wise logic to convert a ring pointer to a ring index */
eqcr_ptr2idx(struct qm_eqcr_entry * e)405c535e923SClaudiu Manoil static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
406c535e923SClaudiu Manoil {
407c535e923SClaudiu Manoil return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
408c535e923SClaudiu Manoil }
409c535e923SClaudiu Manoil
410c535e923SClaudiu Manoil /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
eqcr_inc(struct qm_eqcr * eqcr)411c535e923SClaudiu Manoil static inline void eqcr_inc(struct qm_eqcr *eqcr)
412c535e923SClaudiu Manoil {
413c535e923SClaudiu Manoil /* increment to the next EQCR pointer and handle overflow and 'vbit' */
414c535e923SClaudiu Manoil struct qm_eqcr_entry *partial = eqcr->cursor + 1;
415c535e923SClaudiu Manoil
416c535e923SClaudiu Manoil eqcr->cursor = eqcr_carryclear(partial);
417c535e923SClaudiu Manoil if (partial != eqcr->cursor)
418c535e923SClaudiu Manoil eqcr->vbit ^= QM_EQCR_VERB_VBIT;
419c535e923SClaudiu Manoil }
420c535e923SClaudiu Manoil
qm_eqcr_init(struct qm_portal * portal,enum qm_eqcr_pmode pmode,unsigned int eq_stash_thresh,int eq_stash_prio)421c535e923SClaudiu Manoil static inline int qm_eqcr_init(struct qm_portal *portal,
422c535e923SClaudiu Manoil enum qm_eqcr_pmode pmode,
423c535e923SClaudiu Manoil unsigned int eq_stash_thresh,
424c535e923SClaudiu Manoil int eq_stash_prio)
425c535e923SClaudiu Manoil {
426c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
427c535e923SClaudiu Manoil u32 cfg;
428c535e923SClaudiu Manoil u8 pi;
429c535e923SClaudiu Manoil
430c535e923SClaudiu Manoil eqcr->ring = portal->addr.ce + QM_CL_EQCR;
431c535e923SClaudiu Manoil eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
432c535e923SClaudiu Manoil qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
433c535e923SClaudiu Manoil pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
434c535e923SClaudiu Manoil eqcr->cursor = eqcr->ring + pi;
435c535e923SClaudiu Manoil eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
436c535e923SClaudiu Manoil QM_EQCR_VERB_VBIT : 0;
437c535e923SClaudiu Manoil eqcr->available = QM_EQCR_SIZE - 1 -
438c535e923SClaudiu Manoil dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
439c535e923SClaudiu Manoil eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
440c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
441c535e923SClaudiu Manoil eqcr->busy = 0;
442c535e923SClaudiu Manoil eqcr->pmode = pmode;
443c535e923SClaudiu Manoil #endif
444c535e923SClaudiu Manoil cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
445c535e923SClaudiu Manoil (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
446c535e923SClaudiu Manoil (eq_stash_prio << 26) | /* QCSP_CFG: EP */
447c535e923SClaudiu Manoil ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
448c535e923SClaudiu Manoil qm_out(portal, QM_REG_CFG, cfg);
449c535e923SClaudiu Manoil return 0;
450c535e923SClaudiu Manoil }
451c535e923SClaudiu Manoil
qm_eqcr_finish(struct qm_portal * portal)452c535e923SClaudiu Manoil static inline void qm_eqcr_finish(struct qm_portal *portal)
453c535e923SClaudiu Manoil {
454c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
455c535e923SClaudiu Manoil u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
456c535e923SClaudiu Manoil u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
457c535e923SClaudiu Manoil
458c535e923SClaudiu Manoil DPAA_ASSERT(!eqcr->busy);
459c535e923SClaudiu Manoil if (pi != eqcr_ptr2idx(eqcr->cursor))
46057907a73SColin Ian King pr_crit("losing uncommitted EQCR entries\n");
461c535e923SClaudiu Manoil if (ci != eqcr->ci)
462c535e923SClaudiu Manoil pr_crit("missing existing EQCR completions\n");
463c535e923SClaudiu Manoil if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
464c535e923SClaudiu Manoil pr_crit("EQCR destroyed unquiesced\n");
465c535e923SClaudiu Manoil }
466c535e923SClaudiu Manoil
qm_eqcr_start_no_stash(struct qm_portal * portal)467c535e923SClaudiu Manoil static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
468c535e923SClaudiu Manoil *portal)
469c535e923SClaudiu Manoil {
470c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
471c535e923SClaudiu Manoil
472c535e923SClaudiu Manoil DPAA_ASSERT(!eqcr->busy);
473c535e923SClaudiu Manoil if (!eqcr->available)
474c535e923SClaudiu Manoil return NULL;
475c535e923SClaudiu Manoil
476c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
477c535e923SClaudiu Manoil eqcr->busy = 1;
478c535e923SClaudiu Manoil #endif
479c535e923SClaudiu Manoil dpaa_zero(eqcr->cursor);
480c535e923SClaudiu Manoil return eqcr->cursor;
481c535e923SClaudiu Manoil }
482c535e923SClaudiu Manoil
qm_eqcr_start_stash(struct qm_portal * portal)483c535e923SClaudiu Manoil static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
484c535e923SClaudiu Manoil *portal)
485c535e923SClaudiu Manoil {
486c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
487c535e923SClaudiu Manoil u8 diff, old_ci;
488c535e923SClaudiu Manoil
489c535e923SClaudiu Manoil DPAA_ASSERT(!eqcr->busy);
490c535e923SClaudiu Manoil if (!eqcr->available) {
491c535e923SClaudiu Manoil old_ci = eqcr->ci;
492c535e923SClaudiu Manoil eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
493c535e923SClaudiu Manoil (QM_EQCR_SIZE - 1);
494c535e923SClaudiu Manoil diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
495c535e923SClaudiu Manoil eqcr->available += diff;
496c535e923SClaudiu Manoil if (!diff)
497c535e923SClaudiu Manoil return NULL;
498c535e923SClaudiu Manoil }
499c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
500c535e923SClaudiu Manoil eqcr->busy = 1;
501c535e923SClaudiu Manoil #endif
502c535e923SClaudiu Manoil dpaa_zero(eqcr->cursor);
503c535e923SClaudiu Manoil return eqcr->cursor;
504c535e923SClaudiu Manoil }
505c535e923SClaudiu Manoil
eqcr_commit_checks(struct qm_eqcr * eqcr)506c535e923SClaudiu Manoil static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
507c535e923SClaudiu Manoil {
508c535e923SClaudiu Manoil DPAA_ASSERT(eqcr->busy);
50918058822SClaudiu Manoil DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
510c535e923SClaudiu Manoil DPAA_ASSERT(eqcr->available >= 1);
511c535e923SClaudiu Manoil }
512c535e923SClaudiu Manoil
qm_eqcr_pvb_commit(struct qm_portal * portal,u8 myverb)513c535e923SClaudiu Manoil static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
514c535e923SClaudiu Manoil {
515c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
516c535e923SClaudiu Manoil struct qm_eqcr_entry *eqcursor;
517c535e923SClaudiu Manoil
518c535e923SClaudiu Manoil eqcr_commit_checks(eqcr);
519c535e923SClaudiu Manoil DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
520c535e923SClaudiu Manoil dma_wmb();
521c535e923SClaudiu Manoil eqcursor = eqcr->cursor;
522c535e923SClaudiu Manoil eqcursor->_ncw_verb = myverb | eqcr->vbit;
523c535e923SClaudiu Manoil dpaa_flush(eqcursor);
524c535e923SClaudiu Manoil eqcr_inc(eqcr);
525c535e923SClaudiu Manoil eqcr->available--;
526c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
527c535e923SClaudiu Manoil eqcr->busy = 0;
528c535e923SClaudiu Manoil #endif
529c535e923SClaudiu Manoil }
530c535e923SClaudiu Manoil
qm_eqcr_cce_prefetch(struct qm_portal * portal)531c535e923SClaudiu Manoil static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
532c535e923SClaudiu Manoil {
533c535e923SClaudiu Manoil qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
534c535e923SClaudiu Manoil }
535c535e923SClaudiu Manoil
qm_eqcr_cce_update(struct qm_portal * portal)536c535e923SClaudiu Manoil static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
537c535e923SClaudiu Manoil {
538c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
539c535e923SClaudiu Manoil u8 diff, old_ci = eqcr->ci;
540c535e923SClaudiu Manoil
541c535e923SClaudiu Manoil eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
542c535e923SClaudiu Manoil qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
543c535e923SClaudiu Manoil diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
544c535e923SClaudiu Manoil eqcr->available += diff;
545c535e923SClaudiu Manoil return diff;
546c535e923SClaudiu Manoil }
547c535e923SClaudiu Manoil
qm_eqcr_set_ithresh(struct qm_portal * portal,u8 ithresh)548c535e923SClaudiu Manoil static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
549c535e923SClaudiu Manoil {
550c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
551c535e923SClaudiu Manoil
552c535e923SClaudiu Manoil eqcr->ithresh = ithresh;
553c535e923SClaudiu Manoil qm_out(portal, QM_REG_EQCR_ITR, ithresh);
554c535e923SClaudiu Manoil }
555c535e923SClaudiu Manoil
qm_eqcr_get_avail(struct qm_portal * portal)556c535e923SClaudiu Manoil static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
557c535e923SClaudiu Manoil {
558c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
559c535e923SClaudiu Manoil
560c535e923SClaudiu Manoil return eqcr->available;
561c535e923SClaudiu Manoil }
562c535e923SClaudiu Manoil
qm_eqcr_get_fill(struct qm_portal * portal)563c535e923SClaudiu Manoil static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
564c535e923SClaudiu Manoil {
565c535e923SClaudiu Manoil struct qm_eqcr *eqcr = &portal->eqcr;
566c535e923SClaudiu Manoil
567c535e923SClaudiu Manoil return QM_EQCR_SIZE - 1 - eqcr->available;
568c535e923SClaudiu Manoil }
569c535e923SClaudiu Manoil
570c535e923SClaudiu Manoil /* --- DQRR API --- */
571c535e923SClaudiu Manoil
572c535e923SClaudiu Manoil #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
573c535e923SClaudiu Manoil #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
574c535e923SClaudiu Manoil
dqrr_carryclear(const struct qm_dqrr_entry * p)575c535e923SClaudiu Manoil static const struct qm_dqrr_entry *dqrr_carryclear(
576c535e923SClaudiu Manoil const struct qm_dqrr_entry *p)
577c535e923SClaudiu Manoil {
578c535e923SClaudiu Manoil uintptr_t addr = (uintptr_t)p;
579c535e923SClaudiu Manoil
580c535e923SClaudiu Manoil addr &= ~DQRR_CARRY;
581c535e923SClaudiu Manoil
582c535e923SClaudiu Manoil return (const struct qm_dqrr_entry *)addr;
583c535e923SClaudiu Manoil }
584c535e923SClaudiu Manoil
dqrr_ptr2idx(const struct qm_dqrr_entry * e)585c535e923SClaudiu Manoil static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
586c535e923SClaudiu Manoil {
587c535e923SClaudiu Manoil return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
588c535e923SClaudiu Manoil }
589c535e923SClaudiu Manoil
dqrr_inc(const struct qm_dqrr_entry * e)590c535e923SClaudiu Manoil static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
591c535e923SClaudiu Manoil {
592c535e923SClaudiu Manoil return dqrr_carryclear(e + 1);
593c535e923SClaudiu Manoil }
594c535e923SClaudiu Manoil
qm_dqrr_set_maxfill(struct qm_portal * portal,u8 mf)595c535e923SClaudiu Manoil static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
596c535e923SClaudiu Manoil {
597c535e923SClaudiu Manoil qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
598c535e923SClaudiu Manoil ((mf & (QM_DQRR_SIZE - 1)) << 20));
599c535e923SClaudiu Manoil }
600c535e923SClaudiu Manoil
qm_dqrr_init(struct qm_portal * portal,const struct qm_portal_config * config,enum qm_dqrr_dmode dmode,enum qm_dqrr_pmode pmode,enum qm_dqrr_cmode cmode,u8 max_fill)601c535e923SClaudiu Manoil static inline int qm_dqrr_init(struct qm_portal *portal,
602c535e923SClaudiu Manoil const struct qm_portal_config *config,
603c535e923SClaudiu Manoil enum qm_dqrr_dmode dmode,
604c535e923SClaudiu Manoil enum qm_dqrr_pmode pmode,
605c535e923SClaudiu Manoil enum qm_dqrr_cmode cmode, u8 max_fill)
606c535e923SClaudiu Manoil {
607c535e923SClaudiu Manoil struct qm_dqrr *dqrr = &portal->dqrr;
608c535e923SClaudiu Manoil u32 cfg;
609c535e923SClaudiu Manoil
610c535e923SClaudiu Manoil /* Make sure the DQRR will be idle when we enable */
611c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_SDQCR, 0);
612c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_VDQCR, 0);
613c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_PDQCR, 0);
614c535e923SClaudiu Manoil dqrr->ring = portal->addr.ce + QM_CL_DQRR;
615c535e923SClaudiu Manoil dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
616c535e923SClaudiu Manoil dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
617c535e923SClaudiu Manoil dqrr->cursor = dqrr->ring + dqrr->ci;
618c535e923SClaudiu Manoil dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
619c535e923SClaudiu Manoil dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
620c535e923SClaudiu Manoil QM_DQRR_VERB_VBIT : 0;
621c535e923SClaudiu Manoil dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
622c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
623c535e923SClaudiu Manoil dqrr->dmode = dmode;
624c535e923SClaudiu Manoil dqrr->pmode = pmode;
625c535e923SClaudiu Manoil dqrr->cmode = cmode;
626c535e923SClaudiu Manoil #endif
627c535e923SClaudiu Manoil /* Invalidate every ring entry before beginning */
628c535e923SClaudiu Manoil for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
629c535e923SClaudiu Manoil dpaa_invalidate(qm_cl(dqrr->ring, cfg));
630c535e923SClaudiu Manoil cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
631c535e923SClaudiu Manoil ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
632c535e923SClaudiu Manoil ((dmode & 1) << 18) | /* DP */
633c535e923SClaudiu Manoil ((cmode & 3) << 16) | /* DCM */
634c535e923SClaudiu Manoil 0xa0 | /* RE+SE */
635c535e923SClaudiu Manoil (0 ? 0x40 : 0) | /* Ignore RP */
636c535e923SClaudiu Manoil (0 ? 0x10 : 0); /* Ignore SP */
637c535e923SClaudiu Manoil qm_out(portal, QM_REG_CFG, cfg);
638c535e923SClaudiu Manoil qm_dqrr_set_maxfill(portal, max_fill);
639c535e923SClaudiu Manoil return 0;
640c535e923SClaudiu Manoil }
641c535e923SClaudiu Manoil
qm_dqrr_finish(struct qm_portal * portal)642c535e923SClaudiu Manoil static inline void qm_dqrr_finish(struct qm_portal *portal)
643c535e923SClaudiu Manoil {
644c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
645c535e923SClaudiu Manoil struct qm_dqrr *dqrr = &portal->dqrr;
646c535e923SClaudiu Manoil
647c535e923SClaudiu Manoil if (dqrr->cmode != qm_dqrr_cdc &&
648c535e923SClaudiu Manoil dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
649c535e923SClaudiu Manoil pr_crit("Ignoring completed DQRR entries\n");
650c535e923SClaudiu Manoil #endif
651c535e923SClaudiu Manoil }
652c535e923SClaudiu Manoil
qm_dqrr_current(struct qm_portal * portal)653c535e923SClaudiu Manoil static inline const struct qm_dqrr_entry *qm_dqrr_current(
654c535e923SClaudiu Manoil struct qm_portal *portal)
655c535e923SClaudiu Manoil {
656c535e923SClaudiu Manoil struct qm_dqrr *dqrr = &portal->dqrr;
657c535e923SClaudiu Manoil
658c535e923SClaudiu Manoil if (!dqrr->fill)
659c535e923SClaudiu Manoil return NULL;
660c535e923SClaudiu Manoil return dqrr->cursor;
661c535e923SClaudiu Manoil }
662c535e923SClaudiu Manoil
qm_dqrr_next(struct qm_portal * portal)663c535e923SClaudiu Manoil static inline u8 qm_dqrr_next(struct qm_portal *portal)
664c535e923SClaudiu Manoil {
665c535e923SClaudiu Manoil struct qm_dqrr *dqrr = &portal->dqrr;
666c535e923SClaudiu Manoil
667c535e923SClaudiu Manoil DPAA_ASSERT(dqrr->fill);
668c535e923SClaudiu Manoil dqrr->cursor = dqrr_inc(dqrr->cursor);
669c535e923SClaudiu Manoil return --dqrr->fill;
670c535e923SClaudiu Manoil }
671c535e923SClaudiu Manoil
qm_dqrr_pvb_update(struct qm_portal * portal)672c535e923SClaudiu Manoil static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
673c535e923SClaudiu Manoil {
674c535e923SClaudiu Manoil struct qm_dqrr *dqrr = &portal->dqrr;
675c535e923SClaudiu Manoil struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
676c535e923SClaudiu Manoil
677c535e923SClaudiu Manoil DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
678c535e923SClaudiu Manoil #ifndef CONFIG_FSL_PAMU
679c535e923SClaudiu Manoil /*
680c535e923SClaudiu Manoil * If PAMU is not available we need to invalidate the cache.
681c535e923SClaudiu Manoil * When PAMU is available the cache is updated by stash
682c535e923SClaudiu Manoil */
683c535e923SClaudiu Manoil dpaa_invalidate_touch_ro(res);
684c535e923SClaudiu Manoil #endif
685e6e2df69SRoy Pledge if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
686c535e923SClaudiu Manoil dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
687c535e923SClaudiu Manoil if (!dqrr->pi)
688c535e923SClaudiu Manoil dqrr->vbit ^= QM_DQRR_VERB_VBIT;
689c535e923SClaudiu Manoil dqrr->fill++;
690c535e923SClaudiu Manoil }
691c535e923SClaudiu Manoil }
692c535e923SClaudiu Manoil
qm_dqrr_cdc_consume_1ptr(struct qm_portal * portal,const struct qm_dqrr_entry * dq,int park)693c535e923SClaudiu Manoil static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
694c535e923SClaudiu Manoil const struct qm_dqrr_entry *dq,
695c535e923SClaudiu Manoil int park)
696c535e923SClaudiu Manoil {
697c535e923SClaudiu Manoil __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
698c535e923SClaudiu Manoil int idx = dqrr_ptr2idx(dq);
699c535e923SClaudiu Manoil
700c535e923SClaudiu Manoil DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
701c535e923SClaudiu Manoil DPAA_ASSERT((dqrr->ring + idx) == dq);
702c535e923SClaudiu Manoil DPAA_ASSERT(idx < QM_DQRR_SIZE);
703c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
704c535e923SClaudiu Manoil ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
705c535e923SClaudiu Manoil idx); /* DQRR_DCAP::DCAP_CI */
706c535e923SClaudiu Manoil }
707c535e923SClaudiu Manoil
qm_dqrr_cdc_consume_n(struct qm_portal * portal,u32 bitmask)708c535e923SClaudiu Manoil static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
709c535e923SClaudiu Manoil {
710c535e923SClaudiu Manoil __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
711c535e923SClaudiu Manoil
712c535e923SClaudiu Manoil DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
713c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
714c535e923SClaudiu Manoil (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
715c535e923SClaudiu Manoil }
716c535e923SClaudiu Manoil
qm_dqrr_sdqcr_set(struct qm_portal * portal,u32 sdqcr)717c535e923SClaudiu Manoil static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
718c535e923SClaudiu Manoil {
719c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
720c535e923SClaudiu Manoil }
721c535e923SClaudiu Manoil
qm_dqrr_vdqcr_set(struct qm_portal * portal,u32 vdqcr)722c535e923SClaudiu Manoil static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
723c535e923SClaudiu Manoil {
724c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
725c535e923SClaudiu Manoil }
726c535e923SClaudiu Manoil
qm_dqrr_set_ithresh(struct qm_portal * portal,u8 ithresh)7275c664aceSMadalin Bucur static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
728c535e923SClaudiu Manoil {
7295c664aceSMadalin Bucur
7305c664aceSMadalin Bucur if (ithresh > QMAN_DQRR_IT_MAX)
7315c664aceSMadalin Bucur return -EINVAL;
7325c664aceSMadalin Bucur
733c535e923SClaudiu Manoil qm_out(portal, QM_REG_DQRR_ITR, ithresh);
7345c664aceSMadalin Bucur
7355c664aceSMadalin Bucur return 0;
736c535e923SClaudiu Manoil }
737c535e923SClaudiu Manoil
738c535e923SClaudiu Manoil /* --- MR API --- */
739c535e923SClaudiu Manoil
740c535e923SClaudiu Manoil #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
741c535e923SClaudiu Manoil #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
742c535e923SClaudiu Manoil
mr_carryclear(union qm_mr_entry * p)743c535e923SClaudiu Manoil static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
744c535e923SClaudiu Manoil {
745c535e923SClaudiu Manoil uintptr_t addr = (uintptr_t)p;
746c535e923SClaudiu Manoil
747c535e923SClaudiu Manoil addr &= ~MR_CARRY;
748c535e923SClaudiu Manoil
749c535e923SClaudiu Manoil return (union qm_mr_entry *)addr;
750c535e923SClaudiu Manoil }
751c535e923SClaudiu Manoil
mr_ptr2idx(const union qm_mr_entry * e)752c535e923SClaudiu Manoil static inline int mr_ptr2idx(const union qm_mr_entry *e)
753c535e923SClaudiu Manoil {
754c535e923SClaudiu Manoil return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
755c535e923SClaudiu Manoil }
756c535e923SClaudiu Manoil
mr_inc(union qm_mr_entry * e)757c535e923SClaudiu Manoil static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
758c535e923SClaudiu Manoil {
759c535e923SClaudiu Manoil return mr_carryclear(e + 1);
760c535e923SClaudiu Manoil }
761c535e923SClaudiu Manoil
qm_mr_init(struct qm_portal * portal,enum qm_mr_pmode pmode,enum qm_mr_cmode cmode)762c535e923SClaudiu Manoil static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
763c535e923SClaudiu Manoil enum qm_mr_cmode cmode)
764c535e923SClaudiu Manoil {
765c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
766c535e923SClaudiu Manoil u32 cfg;
767c535e923SClaudiu Manoil
768c535e923SClaudiu Manoil mr->ring = portal->addr.ce + QM_CL_MR;
769c535e923SClaudiu Manoil mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
770c535e923SClaudiu Manoil mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
771c535e923SClaudiu Manoil mr->cursor = mr->ring + mr->ci;
772c535e923SClaudiu Manoil mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
773c535e923SClaudiu Manoil mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
774c535e923SClaudiu Manoil ? QM_MR_VERB_VBIT : 0;
775c535e923SClaudiu Manoil mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
776c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
777c535e923SClaudiu Manoil mr->pmode = pmode;
778c535e923SClaudiu Manoil mr->cmode = cmode;
779c535e923SClaudiu Manoil #endif
780c535e923SClaudiu Manoil cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
781c535e923SClaudiu Manoil ((cmode & 1) << 8); /* QCSP_CFG:MM */
782c535e923SClaudiu Manoil qm_out(portal, QM_REG_CFG, cfg);
783c535e923SClaudiu Manoil return 0;
784c535e923SClaudiu Manoil }
785c535e923SClaudiu Manoil
qm_mr_finish(struct qm_portal * portal)786c535e923SClaudiu Manoil static inline void qm_mr_finish(struct qm_portal *portal)
787c535e923SClaudiu Manoil {
788c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
789c535e923SClaudiu Manoil
790c535e923SClaudiu Manoil if (mr->ci != mr_ptr2idx(mr->cursor))
791c535e923SClaudiu Manoil pr_crit("Ignoring completed MR entries\n");
792c535e923SClaudiu Manoil }
793c535e923SClaudiu Manoil
qm_mr_current(struct qm_portal * portal)794c535e923SClaudiu Manoil static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
795c535e923SClaudiu Manoil {
796c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
797c535e923SClaudiu Manoil
798c535e923SClaudiu Manoil if (!mr->fill)
799c535e923SClaudiu Manoil return NULL;
800c535e923SClaudiu Manoil return mr->cursor;
801c535e923SClaudiu Manoil }
802c535e923SClaudiu Manoil
qm_mr_next(struct qm_portal * portal)803c535e923SClaudiu Manoil static inline int qm_mr_next(struct qm_portal *portal)
804c535e923SClaudiu Manoil {
805c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
806c535e923SClaudiu Manoil
807c535e923SClaudiu Manoil DPAA_ASSERT(mr->fill);
808c535e923SClaudiu Manoil mr->cursor = mr_inc(mr->cursor);
809c535e923SClaudiu Manoil return --mr->fill;
810c535e923SClaudiu Manoil }
811c535e923SClaudiu Manoil
qm_mr_pvb_update(struct qm_portal * portal)812c535e923SClaudiu Manoil static inline void qm_mr_pvb_update(struct qm_portal *portal)
813c535e923SClaudiu Manoil {
814c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
815c535e923SClaudiu Manoil union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
816c535e923SClaudiu Manoil
817c535e923SClaudiu Manoil DPAA_ASSERT(mr->pmode == qm_mr_pvb);
818e6e2df69SRoy Pledge
819e6e2df69SRoy Pledge if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
820c535e923SClaudiu Manoil mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
821c535e923SClaudiu Manoil if (!mr->pi)
822c535e923SClaudiu Manoil mr->vbit ^= QM_MR_VERB_VBIT;
823c535e923SClaudiu Manoil mr->fill++;
824c535e923SClaudiu Manoil res = mr_inc(res);
825c535e923SClaudiu Manoil }
826c535e923SClaudiu Manoil dpaa_invalidate_touch_ro(res);
827c535e923SClaudiu Manoil }
828c535e923SClaudiu Manoil
qm_mr_cci_consume(struct qm_portal * portal,u8 num)829c535e923SClaudiu Manoil static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
830c535e923SClaudiu Manoil {
831c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
832c535e923SClaudiu Manoil
833c535e923SClaudiu Manoil DPAA_ASSERT(mr->cmode == qm_mr_cci);
834c535e923SClaudiu Manoil mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
835c535e923SClaudiu Manoil qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
836c535e923SClaudiu Manoil }
837c535e923SClaudiu Manoil
qm_mr_cci_consume_to_current(struct qm_portal * portal)838c535e923SClaudiu Manoil static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
839c535e923SClaudiu Manoil {
840c535e923SClaudiu Manoil struct qm_mr *mr = &portal->mr;
841c535e923SClaudiu Manoil
842c535e923SClaudiu Manoil DPAA_ASSERT(mr->cmode == qm_mr_cci);
843c535e923SClaudiu Manoil mr->ci = mr_ptr2idx(mr->cursor);
844c535e923SClaudiu Manoil qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
845c535e923SClaudiu Manoil }
846c535e923SClaudiu Manoil
qm_mr_set_ithresh(struct qm_portal * portal,u8 ithresh)847c535e923SClaudiu Manoil static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
848c535e923SClaudiu Manoil {
849c535e923SClaudiu Manoil qm_out(portal, QM_REG_MR_ITR, ithresh);
850c535e923SClaudiu Manoil }
851c535e923SClaudiu Manoil
852c535e923SClaudiu Manoil /* --- Management command API --- */
853c535e923SClaudiu Manoil
qm_mc_init(struct qm_portal * portal)854c535e923SClaudiu Manoil static inline int qm_mc_init(struct qm_portal *portal)
855c535e923SClaudiu Manoil {
856f1c98ee6SRoy Pledge u8 rr0, rr1;
857c535e923SClaudiu Manoil struct qm_mc *mc = &portal->mc;
858c535e923SClaudiu Manoil
859c535e923SClaudiu Manoil mc->cr = portal->addr.ce + QM_CL_CR;
860c535e923SClaudiu Manoil mc->rr = portal->addr.ce + QM_CL_RR0;
861f1c98ee6SRoy Pledge /*
862f1c98ee6SRoy Pledge * The expected valid bit polarity for the next CR command is 0
863f1c98ee6SRoy Pledge * if RR1 contains a valid response, and is 1 if RR0 contains a
864f1c98ee6SRoy Pledge * valid response. If both RR contain all 0, this indicates either
865f1c98ee6SRoy Pledge * that no command has been executed since reset (in which case the
866f1c98ee6SRoy Pledge * expected valid bit polarity is 1)
867f1c98ee6SRoy Pledge */
868f1c98ee6SRoy Pledge rr0 = mc->rr->verb;
869f1c98ee6SRoy Pledge rr1 = (mc->rr+1)->verb;
870f1c98ee6SRoy Pledge if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
871f1c98ee6SRoy Pledge mc->rridx = 1;
872f1c98ee6SRoy Pledge else
873f1c98ee6SRoy Pledge mc->rridx = 0;
874c535e923SClaudiu Manoil mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
875c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
876c535e923SClaudiu Manoil mc->state = qman_mc_idle;
877c535e923SClaudiu Manoil #endif
878c535e923SClaudiu Manoil return 0;
879c535e923SClaudiu Manoil }
880c535e923SClaudiu Manoil
qm_mc_finish(struct qm_portal * portal)881c535e923SClaudiu Manoil static inline void qm_mc_finish(struct qm_portal *portal)
882c535e923SClaudiu Manoil {
883c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
884c535e923SClaudiu Manoil struct qm_mc *mc = &portal->mc;
885c535e923SClaudiu Manoil
886c535e923SClaudiu Manoil DPAA_ASSERT(mc->state == qman_mc_idle);
887c535e923SClaudiu Manoil if (mc->state != qman_mc_idle)
888c535e923SClaudiu Manoil pr_crit("Losing incomplete MC command\n");
889c535e923SClaudiu Manoil #endif
890c535e923SClaudiu Manoil }
891c535e923SClaudiu Manoil
qm_mc_start(struct qm_portal * portal)892c535e923SClaudiu Manoil static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
893c535e923SClaudiu Manoil {
894c535e923SClaudiu Manoil struct qm_mc *mc = &portal->mc;
895c535e923SClaudiu Manoil
896c535e923SClaudiu Manoil DPAA_ASSERT(mc->state == qman_mc_idle);
897c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
898c535e923SClaudiu Manoil mc->state = qman_mc_user;
899c535e923SClaudiu Manoil #endif
900c535e923SClaudiu Manoil dpaa_zero(mc->cr);
901c535e923SClaudiu Manoil return mc->cr;
902c535e923SClaudiu Manoil }
903c535e923SClaudiu Manoil
qm_mc_commit(struct qm_portal * portal,u8 myverb)904c535e923SClaudiu Manoil static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
905c535e923SClaudiu Manoil {
906c535e923SClaudiu Manoil struct qm_mc *mc = &portal->mc;
907c535e923SClaudiu Manoil union qm_mc_result *rr = mc->rr + mc->rridx;
908c535e923SClaudiu Manoil
909c535e923SClaudiu Manoil DPAA_ASSERT(mc->state == qman_mc_user);
910c535e923SClaudiu Manoil dma_wmb();
911c535e923SClaudiu Manoil mc->cr->_ncw_verb = myverb | mc->vbit;
912c535e923SClaudiu Manoil dpaa_flush(mc->cr);
913c535e923SClaudiu Manoil dpaa_invalidate_touch_ro(rr);
914c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
915c535e923SClaudiu Manoil mc->state = qman_mc_hw;
916c535e923SClaudiu Manoil #endif
917c535e923SClaudiu Manoil }
918c535e923SClaudiu Manoil
qm_mc_result(struct qm_portal * portal)919c535e923SClaudiu Manoil static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
920c535e923SClaudiu Manoil {
921c535e923SClaudiu Manoil struct qm_mc *mc = &portal->mc;
922c535e923SClaudiu Manoil union qm_mc_result *rr = mc->rr + mc->rridx;
923c535e923SClaudiu Manoil
924c535e923SClaudiu Manoil DPAA_ASSERT(mc->state == qman_mc_hw);
925c535e923SClaudiu Manoil /*
926c535e923SClaudiu Manoil * The inactive response register's verb byte always returns zero until
927c535e923SClaudiu Manoil * its command is submitted and completed. This includes the valid-bit,
928c535e923SClaudiu Manoil * in case you were wondering...
929c535e923SClaudiu Manoil */
930e6e2df69SRoy Pledge if (!rr->verb) {
931c535e923SClaudiu Manoil dpaa_invalidate_touch_ro(rr);
932c535e923SClaudiu Manoil return NULL;
933c535e923SClaudiu Manoil }
934c535e923SClaudiu Manoil mc->rridx ^= 1;
935c535e923SClaudiu Manoil mc->vbit ^= QM_MCC_VERB_VBIT;
936c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
937c535e923SClaudiu Manoil mc->state = qman_mc_idle;
938c535e923SClaudiu Manoil #endif
939c535e923SClaudiu Manoil return rr;
940c535e923SClaudiu Manoil }
941c535e923SClaudiu Manoil
qm_mc_result_timeout(struct qm_portal * portal,union qm_mc_result ** mcr)942c535e923SClaudiu Manoil static inline int qm_mc_result_timeout(struct qm_portal *portal,
943c535e923SClaudiu Manoil union qm_mc_result **mcr)
944c535e923SClaudiu Manoil {
945c535e923SClaudiu Manoil int timeout = QM_MCR_TIMEOUT;
946c535e923SClaudiu Manoil
947c535e923SClaudiu Manoil do {
948c535e923SClaudiu Manoil *mcr = qm_mc_result(portal);
949c535e923SClaudiu Manoil if (*mcr)
950c535e923SClaudiu Manoil break;
951c535e923SClaudiu Manoil udelay(1);
952c535e923SClaudiu Manoil } while (--timeout);
953c535e923SClaudiu Manoil
954c535e923SClaudiu Manoil return timeout;
955c535e923SClaudiu Manoil }
956c535e923SClaudiu Manoil
fq_set(struct qman_fq * fq,u32 mask)957c535e923SClaudiu Manoil static inline void fq_set(struct qman_fq *fq, u32 mask)
958c535e923SClaudiu Manoil {
959f5bd2299SMadalin Bucur fq->flags |= mask;
960c535e923SClaudiu Manoil }
961c535e923SClaudiu Manoil
fq_clear(struct qman_fq * fq,u32 mask)962c535e923SClaudiu Manoil static inline void fq_clear(struct qman_fq *fq, u32 mask)
963c535e923SClaudiu Manoil {
964f5bd2299SMadalin Bucur fq->flags &= ~mask;
965c535e923SClaudiu Manoil }
966c535e923SClaudiu Manoil
fq_isset(struct qman_fq * fq,u32 mask)967c535e923SClaudiu Manoil static inline int fq_isset(struct qman_fq *fq, u32 mask)
968c535e923SClaudiu Manoil {
969c535e923SClaudiu Manoil return fq->flags & mask;
970c535e923SClaudiu Manoil }
971c535e923SClaudiu Manoil
fq_isclear(struct qman_fq * fq,u32 mask)972c535e923SClaudiu Manoil static inline int fq_isclear(struct qman_fq *fq, u32 mask)
973c535e923SClaudiu Manoil {
974c535e923SClaudiu Manoil return !(fq->flags & mask);
975c535e923SClaudiu Manoil }
976c535e923SClaudiu Manoil
977c535e923SClaudiu Manoil struct qman_portal {
978c535e923SClaudiu Manoil struct qm_portal p;
979c535e923SClaudiu Manoil /* PORTAL_BITS_*** - dynamic, strictly internal */
980c535e923SClaudiu Manoil unsigned long bits;
981c535e923SClaudiu Manoil /* interrupt sources processed by portal_isr(), configurable */
982c535e923SClaudiu Manoil unsigned long irq_sources;
983c535e923SClaudiu Manoil u32 use_eqcr_ci_stashing;
984c535e923SClaudiu Manoil /* only 1 volatile dequeue at a time */
985c535e923SClaudiu Manoil struct qman_fq *vdqcr_owned;
986c535e923SClaudiu Manoil u32 sdqcr;
987c535e923SClaudiu Manoil /* probing time config params for cpu-affine portals */
988c535e923SClaudiu Manoil const struct qm_portal_config *config;
989c535e923SClaudiu Manoil /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
990c535e923SClaudiu Manoil struct qman_cgrs *cgrs;
991c535e923SClaudiu Manoil /* linked-list of CSCN handlers. */
992c535e923SClaudiu Manoil struct list_head cgr_cbs;
993c535e923SClaudiu Manoil /* list lock */
994*54d26adfSSean Anderson raw_spinlock_t cgr_lock;
995c535e923SClaudiu Manoil struct work_struct congestion_work;
996c535e923SClaudiu Manoil struct work_struct mr_work;
997c535e923SClaudiu Manoil char irqname[MAX_IRQNAME];
998c535e923SClaudiu Manoil };
999c535e923SClaudiu Manoil
1000c535e923SClaudiu Manoil static cpumask_t affine_mask;
1001c535e923SClaudiu Manoil static DEFINE_SPINLOCK(affine_mask_lock);
1002c535e923SClaudiu Manoil static u16 affine_channels[NR_CPUS];
1003c535e923SClaudiu Manoil static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1004c535e923SClaudiu Manoil struct qman_portal *affine_portals[NR_CPUS];
1005c535e923SClaudiu Manoil
get_affine_portal(void)1006c535e923SClaudiu Manoil static inline struct qman_portal *get_affine_portal(void)
1007c535e923SClaudiu Manoil {
1008c535e923SClaudiu Manoil return &get_cpu_var(qman_affine_portal);
1009c535e923SClaudiu Manoil }
1010c535e923SClaudiu Manoil
put_affine_portal(void)1011c535e923SClaudiu Manoil static inline void put_affine_portal(void)
1012c535e923SClaudiu Manoil {
1013c535e923SClaudiu Manoil put_cpu_var(qman_affine_portal);
1014c535e923SClaudiu Manoil }
1015c535e923SClaudiu Manoil
1016e844168aSRoy Pledge
get_portal_for_channel(u16 channel)1017e844168aSRoy Pledge static inline struct qman_portal *get_portal_for_channel(u16 channel)
1018e844168aSRoy Pledge {
1019e844168aSRoy Pledge int i;
1020e844168aSRoy Pledge
1021e844168aSRoy Pledge for (i = 0; i < num_possible_cpus(); i++) {
1022e844168aSRoy Pledge if (affine_portals[i] &&
1023e844168aSRoy Pledge affine_portals[i]->config->channel == channel)
1024e844168aSRoy Pledge return affine_portals[i];
1025e844168aSRoy Pledge }
1026e844168aSRoy Pledge
1027e844168aSRoy Pledge return NULL;
1028e844168aSRoy Pledge }
1029e844168aSRoy Pledge
1030c535e923SClaudiu Manoil static struct workqueue_struct *qm_portal_wq;
1031c535e923SClaudiu Manoil
qman_dqrr_set_ithresh(struct qman_portal * portal,u8 ithresh)10325c664aceSMadalin Bucur int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
10336d06009cSMadalin Bucur {
10345c664aceSMadalin Bucur int res;
10356d06009cSMadalin Bucur
10365c664aceSMadalin Bucur if (!portal)
10375c664aceSMadalin Bucur return -EINVAL;
10385c664aceSMadalin Bucur
10395c664aceSMadalin Bucur res = qm_dqrr_set_ithresh(&portal->p, ithresh);
10405c664aceSMadalin Bucur if (res)
10415c664aceSMadalin Bucur return res;
10425c664aceSMadalin Bucur
10436d06009cSMadalin Bucur portal->p.dqrr.ithresh = ithresh;
10445c664aceSMadalin Bucur
10455c664aceSMadalin Bucur return 0;
10466d06009cSMadalin Bucur }
10476d06009cSMadalin Bucur EXPORT_SYMBOL(qman_dqrr_set_ithresh);
10486d06009cSMadalin Bucur
qman_dqrr_get_ithresh(struct qman_portal * portal,u8 * ithresh)10496d06009cSMadalin Bucur void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
10506d06009cSMadalin Bucur {
10516d06009cSMadalin Bucur if (portal && ithresh)
1052830b61baSMadalin Bucur *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
10536d06009cSMadalin Bucur }
10546d06009cSMadalin Bucur EXPORT_SYMBOL(qman_dqrr_get_ithresh);
10556d06009cSMadalin Bucur
qman_portal_get_iperiod(struct qman_portal * portal,u32 * iperiod)10566d06009cSMadalin Bucur void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
10576d06009cSMadalin Bucur {
10586d06009cSMadalin Bucur if (portal && iperiod)
10596d06009cSMadalin Bucur *iperiod = qm_in(&portal->p, QM_REG_ITPR);
10606d06009cSMadalin Bucur }
10616d06009cSMadalin Bucur EXPORT_SYMBOL(qman_portal_get_iperiod);
10626d06009cSMadalin Bucur
qman_portal_set_iperiod(struct qman_portal * portal,u32 iperiod)10635c664aceSMadalin Bucur int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
10646d06009cSMadalin Bucur {
10655c664aceSMadalin Bucur if (!portal || iperiod > QMAN_ITP_MAX)
10665c664aceSMadalin Bucur return -EINVAL;
10675c664aceSMadalin Bucur
10686d06009cSMadalin Bucur qm_out(&portal->p, QM_REG_ITPR, iperiod);
10695c664aceSMadalin Bucur
10705c664aceSMadalin Bucur return 0;
10716d06009cSMadalin Bucur }
10726d06009cSMadalin Bucur EXPORT_SYMBOL(qman_portal_set_iperiod);
10736d06009cSMadalin Bucur
qman_wq_alloc(void)1074c535e923SClaudiu Manoil int qman_wq_alloc(void)
1075c535e923SClaudiu Manoil {
1076c535e923SClaudiu Manoil qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1077c535e923SClaudiu Manoil if (!qm_portal_wq)
1078c535e923SClaudiu Manoil return -ENOMEM;
1079c535e923SClaudiu Manoil return 0;
1080c535e923SClaudiu Manoil }
1081c535e923SClaudiu Manoil
1082ea2b8488SRoy Pledge
qman_enable_irqs(void)1083ea2b8488SRoy Pledge void qman_enable_irqs(void)
1084ea2b8488SRoy Pledge {
1085ea2b8488SRoy Pledge int i;
1086ea2b8488SRoy Pledge
1087ea2b8488SRoy Pledge for (i = 0; i < num_possible_cpus(); i++) {
1088ea2b8488SRoy Pledge if (affine_portals[i]) {
1089ea2b8488SRoy Pledge qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
1090ea2b8488SRoy Pledge qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
1091ea2b8488SRoy Pledge }
1092ea2b8488SRoy Pledge
1093ea2b8488SRoy Pledge }
1094ea2b8488SRoy Pledge }
1095ea2b8488SRoy Pledge
1096c535e923SClaudiu Manoil /*
1097c535e923SClaudiu Manoil * This is what everything can wait on, even if it migrates to a different cpu
1098c535e923SClaudiu Manoil * to the one whose affine portal it is waiting on.
1099c535e923SClaudiu Manoil */
1100c535e923SClaudiu Manoil static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1101c535e923SClaudiu Manoil
1102c535e923SClaudiu Manoil static struct qman_fq **fq_table;
1103c535e923SClaudiu Manoil static u32 num_fqids;
1104c535e923SClaudiu Manoil
qman_alloc_fq_table(u32 _num_fqids)1105c535e923SClaudiu Manoil int qman_alloc_fq_table(u32 _num_fqids)
1106c535e923SClaudiu Manoil {
1107c535e923SClaudiu Manoil num_fqids = _num_fqids;
1108c535e923SClaudiu Manoil
1109fad953ceSKees Cook fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1110fad953ceSKees Cook num_fqids, 2));
1111c535e923SClaudiu Manoil if (!fq_table)
1112c535e923SClaudiu Manoil return -ENOMEM;
1113c535e923SClaudiu Manoil
1114c535e923SClaudiu Manoil pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1115c535e923SClaudiu Manoil fq_table, num_fqids * 2);
1116c535e923SClaudiu Manoil return 0;
1117c535e923SClaudiu Manoil }
1118c535e923SClaudiu Manoil
idx_to_fq(u32 idx)1119c535e923SClaudiu Manoil static struct qman_fq *idx_to_fq(u32 idx)
1120c535e923SClaudiu Manoil {
1121c535e923SClaudiu Manoil struct qman_fq *fq;
1122c535e923SClaudiu Manoil
1123c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
1124c535e923SClaudiu Manoil if (WARN_ON(idx >= num_fqids * 2))
1125c535e923SClaudiu Manoil return NULL;
1126c535e923SClaudiu Manoil #endif
1127c535e923SClaudiu Manoil fq = fq_table[idx];
1128c535e923SClaudiu Manoil DPAA_ASSERT(!fq || idx == fq->idx);
1129c535e923SClaudiu Manoil
1130c535e923SClaudiu Manoil return fq;
1131c535e923SClaudiu Manoil }
1132c535e923SClaudiu Manoil
1133c535e923SClaudiu Manoil /*
1134c535e923SClaudiu Manoil * Only returns full-service fq objects, not enqueue-only
1135c535e923SClaudiu Manoil * references (QMAN_FQ_FLAG_NO_MODIFY).
1136c535e923SClaudiu Manoil */
fqid_to_fq(u32 fqid)1137c535e923SClaudiu Manoil static struct qman_fq *fqid_to_fq(u32 fqid)
1138c535e923SClaudiu Manoil {
1139c535e923SClaudiu Manoil return idx_to_fq(fqid * 2);
1140c535e923SClaudiu Manoil }
1141c535e923SClaudiu Manoil
tag_to_fq(u32 tag)1142c535e923SClaudiu Manoil static struct qman_fq *tag_to_fq(u32 tag)
1143c535e923SClaudiu Manoil {
1144c535e923SClaudiu Manoil #if BITS_PER_LONG == 64
1145c535e923SClaudiu Manoil return idx_to_fq(tag);
1146c535e923SClaudiu Manoil #else
1147c535e923SClaudiu Manoil return (struct qman_fq *)tag;
1148c535e923SClaudiu Manoil #endif
1149c535e923SClaudiu Manoil }
1150c535e923SClaudiu Manoil
fq_to_tag(struct qman_fq * fq)1151c535e923SClaudiu Manoil static u32 fq_to_tag(struct qman_fq *fq)
1152c535e923SClaudiu Manoil {
1153c535e923SClaudiu Manoil #if BITS_PER_LONG == 64
1154c535e923SClaudiu Manoil return fq->idx;
1155c535e923SClaudiu Manoil #else
1156c535e923SClaudiu Manoil return (u32)fq;
1157c535e923SClaudiu Manoil #endif
1158c535e923SClaudiu Manoil }
1159c535e923SClaudiu Manoil
1160c535e923SClaudiu Manoil static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1161c535e923SClaudiu Manoil static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1162f84754dbSSebastian Andrzej Siewior unsigned int poll_limit, bool sched_napi);
1163c535e923SClaudiu Manoil static void qm_congestion_task(struct work_struct *work);
1164c535e923SClaudiu Manoil static void qm_mr_process_task(struct work_struct *work);
1165c535e923SClaudiu Manoil
portal_isr(int irq,void * ptr)1166c535e923SClaudiu Manoil static irqreturn_t portal_isr(int irq, void *ptr)
1167c535e923SClaudiu Manoil {
1168c535e923SClaudiu Manoil struct qman_portal *p = ptr;
1169c535e923SClaudiu Manoil u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
117089857a8aSMadalin Bucur u32 clear = 0;
1171c535e923SClaudiu Manoil
1172c535e923SClaudiu Manoil if (unlikely(!is))
1173c535e923SClaudiu Manoil return IRQ_NONE;
1174c535e923SClaudiu Manoil
1175c535e923SClaudiu Manoil /* DQRR-handling if it's interrupt-driven */
117689857a8aSMadalin Bucur if (is & QM_PIRQ_DQRI) {
1177f84754dbSSebastian Andrzej Siewior __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
117889857a8aSMadalin Bucur clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
117989857a8aSMadalin Bucur }
1180c535e923SClaudiu Manoil /* Handling of anything else that's interrupt-driven */
118189857a8aSMadalin Bucur clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1182c535e923SClaudiu Manoil qm_out(&p->p, QM_REG_ISR, clear);
1183c535e923SClaudiu Manoil return IRQ_HANDLED;
1184c535e923SClaudiu Manoil }
1185c535e923SClaudiu Manoil
drain_mr_fqrni(struct qm_portal * p)1186c535e923SClaudiu Manoil static int drain_mr_fqrni(struct qm_portal *p)
1187c535e923SClaudiu Manoil {
1188c535e923SClaudiu Manoil const union qm_mr_entry *msg;
1189c535e923SClaudiu Manoil loop:
1190627da8baSRoy Pledge qm_mr_pvb_update(p);
1191c535e923SClaudiu Manoil msg = qm_mr_current(p);
1192c535e923SClaudiu Manoil if (!msg) {
1193c535e923SClaudiu Manoil /*
1194c535e923SClaudiu Manoil * if MR was full and h/w had other FQRNI entries to produce, we
1195c535e923SClaudiu Manoil * need to allow it time to produce those entries once the
1196c535e923SClaudiu Manoil * existing entries are consumed. A worst-case situation
1197c535e923SClaudiu Manoil * (fully-loaded system) means h/w sequencers may have to do 3-4
1198c535e923SClaudiu Manoil * other things before servicing the portal's MR pump, each of
1199c535e923SClaudiu Manoil * which (if slow) may take ~50 qman cycles (which is ~200
1200c535e923SClaudiu Manoil * processor cycles). So rounding up and then multiplying this
1201c535e923SClaudiu Manoil * worst-case estimate by a factor of 10, just to be
1202c535e923SClaudiu Manoil * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1203c535e923SClaudiu Manoil * one entry at a time, so h/w has an opportunity to produce new
1204c535e923SClaudiu Manoil * entries well before the ring has been fully consumed, so
1205c535e923SClaudiu Manoil * we're being *really* paranoid here.
1206c535e923SClaudiu Manoil */
1207627da8baSRoy Pledge mdelay(1);
1208627da8baSRoy Pledge qm_mr_pvb_update(p);
1209c535e923SClaudiu Manoil msg = qm_mr_current(p);
1210c535e923SClaudiu Manoil if (!msg)
1211c535e923SClaudiu Manoil return 0;
1212c535e923SClaudiu Manoil }
1213c535e923SClaudiu Manoil if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1214c535e923SClaudiu Manoil /* We aren't draining anything but FQRNIs */
1215c535e923SClaudiu Manoil pr_err("Found verb 0x%x in MR\n", msg->verb);
1216c535e923SClaudiu Manoil return -1;
1217c535e923SClaudiu Manoil }
1218c535e923SClaudiu Manoil qm_mr_next(p);
1219c535e923SClaudiu Manoil qm_mr_cci_consume(p, 1);
1220c535e923SClaudiu Manoil goto loop;
1221c535e923SClaudiu Manoil }
1222c535e923SClaudiu Manoil
qman_create_portal(struct qman_portal * portal,const struct qm_portal_config * c,const struct qman_cgrs * cgrs)1223c535e923SClaudiu Manoil static int qman_create_portal(struct qman_portal *portal,
1224c535e923SClaudiu Manoil const struct qm_portal_config *c,
1225c535e923SClaudiu Manoil const struct qman_cgrs *cgrs)
1226c535e923SClaudiu Manoil {
1227c535e923SClaudiu Manoil struct qm_portal *p;
1228c535e923SClaudiu Manoil int ret;
1229c535e923SClaudiu Manoil u32 isdr;
1230c535e923SClaudiu Manoil
1231c535e923SClaudiu Manoil p = &portal->p;
1232c535e923SClaudiu Manoil
1233c535e923SClaudiu Manoil #ifdef CONFIG_FSL_PAMU
1234c535e923SClaudiu Manoil /* PAMU is required for stashing */
1235c535e923SClaudiu Manoil portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1236c535e923SClaudiu Manoil #else
1237c535e923SClaudiu Manoil portal->use_eqcr_ci_stashing = 0;
1238c535e923SClaudiu Manoil #endif
1239c535e923SClaudiu Manoil /*
1240c535e923SClaudiu Manoil * prep the low-level portal struct with the mapped addresses from the
1241c535e923SClaudiu Manoil * config, everything that follows depends on it and "config" is more
1242c535e923SClaudiu Manoil * for (de)reference
1243c535e923SClaudiu Manoil */
1244e6e2df69SRoy Pledge p->addr.ce = c->addr_virt_ce;
1245e6e2df69SRoy Pledge p->addr.ce_be = c->addr_virt_ce;
1246e6e2df69SRoy Pledge p->addr.ci = c->addr_virt_ci;
1247c535e923SClaudiu Manoil /*
1248c535e923SClaudiu Manoil * If CI-stashing is used, the current defaults use a threshold of 3,
1249c535e923SClaudiu Manoil * and stash with high-than-DQRR priority.
1250c535e923SClaudiu Manoil */
1251c535e923SClaudiu Manoil if (qm_eqcr_init(p, qm_eqcr_pvb,
1252c535e923SClaudiu Manoil portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1253c535e923SClaudiu Manoil dev_err(c->dev, "EQCR initialisation failed\n");
1254c535e923SClaudiu Manoil goto fail_eqcr;
1255c535e923SClaudiu Manoil }
1256c535e923SClaudiu Manoil if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1257c535e923SClaudiu Manoil qm_dqrr_cdc, DQRR_MAXFILL)) {
1258c535e923SClaudiu Manoil dev_err(c->dev, "DQRR initialisation failed\n");
1259c535e923SClaudiu Manoil goto fail_dqrr;
1260c535e923SClaudiu Manoil }
1261c535e923SClaudiu Manoil if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1262c535e923SClaudiu Manoil dev_err(c->dev, "MR initialisation failed\n");
1263c535e923SClaudiu Manoil goto fail_mr;
1264c535e923SClaudiu Manoil }
1265c535e923SClaudiu Manoil if (qm_mc_init(p)) {
1266c535e923SClaudiu Manoil dev_err(c->dev, "MC initialisation failed\n");
1267c535e923SClaudiu Manoil goto fail_mc;
1268c535e923SClaudiu Manoil }
1269c535e923SClaudiu Manoil /* static interrupt-gating controls */
1270c535e923SClaudiu Manoil qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1271c535e923SClaudiu Manoil qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1272c535e923SClaudiu Manoil qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
12736da2ec56SKees Cook portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
1274c535e923SClaudiu Manoil if (!portal->cgrs)
1275c535e923SClaudiu Manoil goto fail_cgrs;
1276c535e923SClaudiu Manoil /* initial snapshot is no-depletion */
1277c535e923SClaudiu Manoil qman_cgrs_init(&portal->cgrs[1]);
1278c535e923SClaudiu Manoil if (cgrs)
1279c535e923SClaudiu Manoil portal->cgrs[0] = *cgrs;
1280c535e923SClaudiu Manoil else
1281c535e923SClaudiu Manoil /* if the given mask is NULL, assume all CGRs can be seen */
1282c535e923SClaudiu Manoil qman_cgrs_fill(&portal->cgrs[0]);
1283c535e923SClaudiu Manoil INIT_LIST_HEAD(&portal->cgr_cbs);
1284*54d26adfSSean Anderson raw_spin_lock_init(&portal->cgr_lock);
1285c535e923SClaudiu Manoil INIT_WORK(&portal->congestion_work, qm_congestion_task);
1286c535e923SClaudiu Manoil INIT_WORK(&portal->mr_work, qm_mr_process_task);
1287c535e923SClaudiu Manoil portal->bits = 0;
1288c535e923SClaudiu Manoil portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1289c535e923SClaudiu Manoil QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1290c535e923SClaudiu Manoil QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1291c535e923SClaudiu Manoil isdr = 0xffffffff;
1292c535e923SClaudiu Manoil qm_out(p, QM_REG_ISDR, isdr);
1293c535e923SClaudiu Manoil portal->irq_sources = 0;
1294c535e923SClaudiu Manoil qm_out(p, QM_REG_IER, 0);
1295c535e923SClaudiu Manoil snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1296ea2b8488SRoy Pledge qm_out(p, QM_REG_IIR, 1);
1297c535e923SClaudiu Manoil if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1298c535e923SClaudiu Manoil dev_err(c->dev, "request_irq() failed\n");
1299c535e923SClaudiu Manoil goto fail_irq;
1300c535e923SClaudiu Manoil }
13019beaf661SRoy Pledge
13029beaf661SRoy Pledge if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1303c535e923SClaudiu Manoil goto fail_affinity;
1304c535e923SClaudiu Manoil
1305c535e923SClaudiu Manoil /* Need EQCR to be empty before continuing */
1306c535e923SClaudiu Manoil isdr &= ~QM_PIRQ_EQCI;
1307c535e923SClaudiu Manoil qm_out(p, QM_REG_ISDR, isdr);
1308c535e923SClaudiu Manoil ret = qm_eqcr_get_fill(p);
1309c535e923SClaudiu Manoil if (ret) {
1310c535e923SClaudiu Manoil dev_err(c->dev, "EQCR unclean\n");
1311c535e923SClaudiu Manoil goto fail_eqcr_empty;
1312c535e923SClaudiu Manoil }
1313c535e923SClaudiu Manoil isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1314c535e923SClaudiu Manoil qm_out(p, QM_REG_ISDR, isdr);
1315c535e923SClaudiu Manoil if (qm_dqrr_current(p)) {
1316ea2b8488SRoy Pledge dev_dbg(c->dev, "DQRR unclean\n");
1317c535e923SClaudiu Manoil qm_dqrr_cdc_consume_n(p, 0xffff);
1318c535e923SClaudiu Manoil }
1319c535e923SClaudiu Manoil if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1320c535e923SClaudiu Manoil /* special handling, drain just in case it's a few FQRNIs */
1321c535e923SClaudiu Manoil const union qm_mr_entry *e = qm_mr_current(p);
1322c535e923SClaudiu Manoil
1323b6e969dbSClaudiu Manoil dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1324b6e969dbSClaudiu Manoil e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1325c535e923SClaudiu Manoil goto fail_dqrr_mr_empty;
1326c535e923SClaudiu Manoil }
1327c535e923SClaudiu Manoil /* Success */
1328c535e923SClaudiu Manoil portal->config = c;
1329ea2b8488SRoy Pledge qm_out(p, QM_REG_ISR, 0xffffffff);
1330c535e923SClaudiu Manoil qm_out(p, QM_REG_ISDR, 0);
1331ea2b8488SRoy Pledge if (!qman_requires_cleanup())
1332c535e923SClaudiu Manoil qm_out(p, QM_REG_IIR, 0);
1333c535e923SClaudiu Manoil /* Write a sane SDQCR */
1334c535e923SClaudiu Manoil qm_dqrr_sdqcr_set(p, portal->sdqcr);
1335c535e923SClaudiu Manoil return 0;
1336c535e923SClaudiu Manoil
1337c535e923SClaudiu Manoil fail_dqrr_mr_empty:
1338c535e923SClaudiu Manoil fail_eqcr_empty:
1339c535e923SClaudiu Manoil fail_affinity:
1340c535e923SClaudiu Manoil free_irq(c->irq, portal);
1341c535e923SClaudiu Manoil fail_irq:
1342c535e923SClaudiu Manoil kfree(portal->cgrs);
1343c535e923SClaudiu Manoil fail_cgrs:
1344c535e923SClaudiu Manoil qm_mc_finish(p);
1345c535e923SClaudiu Manoil fail_mc:
1346c535e923SClaudiu Manoil qm_mr_finish(p);
1347c535e923SClaudiu Manoil fail_mr:
1348c535e923SClaudiu Manoil qm_dqrr_finish(p);
1349c535e923SClaudiu Manoil fail_dqrr:
1350c535e923SClaudiu Manoil qm_eqcr_finish(p);
1351c535e923SClaudiu Manoil fail_eqcr:
1352c535e923SClaudiu Manoil return -EIO;
1353c535e923SClaudiu Manoil }
1354c535e923SClaudiu Manoil
qman_create_affine_portal(const struct qm_portal_config * c,const struct qman_cgrs * cgrs)1355c535e923SClaudiu Manoil struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1356c535e923SClaudiu Manoil const struct qman_cgrs *cgrs)
1357c535e923SClaudiu Manoil {
1358c535e923SClaudiu Manoil struct qman_portal *portal;
1359c535e923SClaudiu Manoil int err;
1360c535e923SClaudiu Manoil
1361c535e923SClaudiu Manoil portal = &per_cpu(qman_affine_portal, c->cpu);
1362c535e923SClaudiu Manoil err = qman_create_portal(portal, c, cgrs);
1363c535e923SClaudiu Manoil if (err)
1364c535e923SClaudiu Manoil return NULL;
1365c535e923SClaudiu Manoil
1366c535e923SClaudiu Manoil spin_lock(&affine_mask_lock);
1367c535e923SClaudiu Manoil cpumask_set_cpu(c->cpu, &affine_mask);
1368c535e923SClaudiu Manoil affine_channels[c->cpu] = c->channel;
1369c535e923SClaudiu Manoil affine_portals[c->cpu] = portal;
1370c535e923SClaudiu Manoil spin_unlock(&affine_mask_lock);
1371c535e923SClaudiu Manoil
1372c535e923SClaudiu Manoil return portal;
1373c535e923SClaudiu Manoil }
1374c535e923SClaudiu Manoil
qman_destroy_portal(struct qman_portal * qm)1375c535e923SClaudiu Manoil static void qman_destroy_portal(struct qman_portal *qm)
1376c535e923SClaudiu Manoil {
1377c535e923SClaudiu Manoil const struct qm_portal_config *pcfg;
1378c535e923SClaudiu Manoil
1379c535e923SClaudiu Manoil /* Stop dequeues on the portal */
1380c535e923SClaudiu Manoil qm_dqrr_sdqcr_set(&qm->p, 0);
1381c535e923SClaudiu Manoil
1382c535e923SClaudiu Manoil /*
1383c535e923SClaudiu Manoil * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1384c535e923SClaudiu Manoil * something related to QM_PIRQ_EQCI, this may need fixing.
1385c535e923SClaudiu Manoil * Also, due to the prefetching model used for CI updates in the enqueue
1386c535e923SClaudiu Manoil * path, this update will only invalidate the CI cacheline *after*
1387c535e923SClaudiu Manoil * working on it, so we need to call this twice to ensure a full update
1388c535e923SClaudiu Manoil * irrespective of where the enqueue processing was at when the teardown
1389c535e923SClaudiu Manoil * began.
1390c535e923SClaudiu Manoil */
1391c535e923SClaudiu Manoil qm_eqcr_cce_update(&qm->p);
1392c535e923SClaudiu Manoil qm_eqcr_cce_update(&qm->p);
1393c535e923SClaudiu Manoil pcfg = qm->config;
1394c535e923SClaudiu Manoil
1395c535e923SClaudiu Manoil free_irq(pcfg->irq, qm);
1396c535e923SClaudiu Manoil
1397c535e923SClaudiu Manoil kfree(qm->cgrs);
1398c535e923SClaudiu Manoil qm_mc_finish(&qm->p);
1399c535e923SClaudiu Manoil qm_mr_finish(&qm->p);
1400c535e923SClaudiu Manoil qm_dqrr_finish(&qm->p);
1401c535e923SClaudiu Manoil qm_eqcr_finish(&qm->p);
1402c535e923SClaudiu Manoil
1403c535e923SClaudiu Manoil qm->config = NULL;
1404c535e923SClaudiu Manoil }
1405c535e923SClaudiu Manoil
qman_destroy_affine_portal(void)1406c535e923SClaudiu Manoil const struct qm_portal_config *qman_destroy_affine_portal(void)
1407c535e923SClaudiu Manoil {
1408c535e923SClaudiu Manoil struct qman_portal *qm = get_affine_portal();
1409c535e923SClaudiu Manoil const struct qm_portal_config *pcfg;
1410c535e923SClaudiu Manoil int cpu;
1411c535e923SClaudiu Manoil
1412c535e923SClaudiu Manoil pcfg = qm->config;
1413c535e923SClaudiu Manoil cpu = pcfg->cpu;
1414c535e923SClaudiu Manoil
1415c535e923SClaudiu Manoil qman_destroy_portal(qm);
1416c535e923SClaudiu Manoil
1417c535e923SClaudiu Manoil spin_lock(&affine_mask_lock);
1418c535e923SClaudiu Manoil cpumask_clear_cpu(cpu, &affine_mask);
1419c535e923SClaudiu Manoil spin_unlock(&affine_mask_lock);
1420c535e923SClaudiu Manoil put_affine_portal();
1421c535e923SClaudiu Manoil return pcfg;
1422c535e923SClaudiu Manoil }
1423c535e923SClaudiu Manoil
1424c535e923SClaudiu Manoil /* Inline helper to reduce nesting in __poll_portal_slow() */
fq_state_change(struct qman_portal * p,struct qman_fq * fq,const union qm_mr_entry * msg,u8 verb)1425c535e923SClaudiu Manoil static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1426c535e923SClaudiu Manoil const union qm_mr_entry *msg, u8 verb)
1427c535e923SClaudiu Manoil {
1428c535e923SClaudiu Manoil switch (verb) {
1429c535e923SClaudiu Manoil case QM_MR_VERB_FQRL:
1430c535e923SClaudiu Manoil DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1431c535e923SClaudiu Manoil fq_clear(fq, QMAN_FQ_STATE_ORL);
1432c535e923SClaudiu Manoil break;
1433c535e923SClaudiu Manoil case QM_MR_VERB_FQRN:
1434c535e923SClaudiu Manoil DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1435c535e923SClaudiu Manoil fq->state == qman_fq_state_sched);
1436c535e923SClaudiu Manoil DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1437c535e923SClaudiu Manoil fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1438c535e923SClaudiu Manoil if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1439c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_NE);
1440c535e923SClaudiu Manoil if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1441c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_ORL);
1442c535e923SClaudiu Manoil fq->state = qman_fq_state_retired;
1443c535e923SClaudiu Manoil break;
1444c535e923SClaudiu Manoil case QM_MR_VERB_FQPN:
1445c535e923SClaudiu Manoil DPAA_ASSERT(fq->state == qman_fq_state_sched);
1446c535e923SClaudiu Manoil DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1447c535e923SClaudiu Manoil fq->state = qman_fq_state_parked;
1448c535e923SClaudiu Manoil }
1449c535e923SClaudiu Manoil }
1450c535e923SClaudiu Manoil
qm_congestion_task(struct work_struct * work)1451c535e923SClaudiu Manoil static void qm_congestion_task(struct work_struct *work)
1452c535e923SClaudiu Manoil {
1453c535e923SClaudiu Manoil struct qman_portal *p = container_of(work, struct qman_portal,
1454c535e923SClaudiu Manoil congestion_work);
1455c535e923SClaudiu Manoil struct qman_cgrs rr, c;
1456c535e923SClaudiu Manoil union qm_mc_result *mcr;
1457c535e923SClaudiu Manoil struct qman_cgr *cgr;
1458c535e923SClaudiu Manoil
1459*54d26adfSSean Anderson /*
1460*54d26adfSSean Anderson * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
1461*54d26adfSSean Anderson */
1462*54d26adfSSean Anderson raw_spin_lock_irq(&p->cgr_lock);
1463c535e923SClaudiu Manoil qm_mc_start(&p->p);
1464c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1465c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
1466*54d26adfSSean Anderson raw_spin_unlock_irq(&p->cgr_lock);
1467c535e923SClaudiu Manoil dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1468e21c7316SRoy Pledge qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1469c535e923SClaudiu Manoil return;
1470c535e923SClaudiu Manoil }
1471c535e923SClaudiu Manoil /* mask out the ones I'm not interested in */
1472c535e923SClaudiu Manoil qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1473c535e923SClaudiu Manoil &p->cgrs[0]);
1474c535e923SClaudiu Manoil /* check previous snapshot for delta, enter/exit congestion */
1475c535e923SClaudiu Manoil qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1476c535e923SClaudiu Manoil /* update snapshot */
1477c535e923SClaudiu Manoil qman_cgrs_cp(&p->cgrs[1], &rr);
1478c535e923SClaudiu Manoil /* Invoke callback */
1479c535e923SClaudiu Manoil list_for_each_entry(cgr, &p->cgr_cbs, node)
1480c535e923SClaudiu Manoil if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1481c535e923SClaudiu Manoil cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1482*54d26adfSSean Anderson raw_spin_unlock_irq(&p->cgr_lock);
1483e21c7316SRoy Pledge qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1484c535e923SClaudiu Manoil }
1485c535e923SClaudiu Manoil
qm_mr_process_task(struct work_struct * work)1486c535e923SClaudiu Manoil static void qm_mr_process_task(struct work_struct *work)
1487c535e923SClaudiu Manoil {
1488c535e923SClaudiu Manoil struct qman_portal *p = container_of(work, struct qman_portal,
1489c535e923SClaudiu Manoil mr_work);
1490c535e923SClaudiu Manoil const union qm_mr_entry *msg;
1491c535e923SClaudiu Manoil struct qman_fq *fq;
1492c535e923SClaudiu Manoil u8 verb, num = 0;
1493c535e923SClaudiu Manoil
1494c535e923SClaudiu Manoil preempt_disable();
1495c535e923SClaudiu Manoil
1496c535e923SClaudiu Manoil while (1) {
1497c535e923SClaudiu Manoil qm_mr_pvb_update(&p->p);
1498c535e923SClaudiu Manoil msg = qm_mr_current(&p->p);
1499c535e923SClaudiu Manoil if (!msg)
1500c535e923SClaudiu Manoil break;
1501c535e923SClaudiu Manoil
1502c535e923SClaudiu Manoil verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1503c535e923SClaudiu Manoil /* The message is a software ERN iff the 0x20 bit is clear */
1504c535e923SClaudiu Manoil if (verb & 0x20) {
1505c535e923SClaudiu Manoil switch (verb) {
1506c535e923SClaudiu Manoil case QM_MR_VERB_FQRNI:
1507c535e923SClaudiu Manoil /* nada, we drop FQRNIs on the floor */
1508c535e923SClaudiu Manoil break;
1509c535e923SClaudiu Manoil case QM_MR_VERB_FQRN:
1510c535e923SClaudiu Manoil case QM_MR_VERB_FQRL:
1511c535e923SClaudiu Manoil /* Lookup in the retirement table */
1512d6753c7eSClaudiu Manoil fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1513c535e923SClaudiu Manoil if (WARN_ON(!fq))
1514c535e923SClaudiu Manoil break;
1515c535e923SClaudiu Manoil fq_state_change(p, fq, msg, verb);
1516c535e923SClaudiu Manoil if (fq->cb.fqs)
1517c535e923SClaudiu Manoil fq->cb.fqs(p, fq, msg);
1518c535e923SClaudiu Manoil break;
1519c535e923SClaudiu Manoil case QM_MR_VERB_FQPN:
1520c535e923SClaudiu Manoil /* Parked */
152118058822SClaudiu Manoil fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1522c535e923SClaudiu Manoil fq_state_change(p, fq, msg, verb);
1523c535e923SClaudiu Manoil if (fq->cb.fqs)
1524c535e923SClaudiu Manoil fq->cb.fqs(p, fq, msg);
1525c535e923SClaudiu Manoil break;
1526c535e923SClaudiu Manoil case QM_MR_VERB_DC_ERN:
1527c535e923SClaudiu Manoil /* DCP ERN */
1528c535e923SClaudiu Manoil pr_crit_once("Leaking DCP ERNs!\n");
1529c535e923SClaudiu Manoil break;
1530c535e923SClaudiu Manoil default:
1531c535e923SClaudiu Manoil pr_crit("Invalid MR verb 0x%02x\n", verb);
1532c535e923SClaudiu Manoil }
1533c535e923SClaudiu Manoil } else {
1534c535e923SClaudiu Manoil /* Its a software ERN */
153518058822SClaudiu Manoil fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1536c535e923SClaudiu Manoil fq->cb.ern(p, fq, msg);
1537c535e923SClaudiu Manoil }
1538c535e923SClaudiu Manoil num++;
1539c535e923SClaudiu Manoil qm_mr_next(&p->p);
1540c535e923SClaudiu Manoil }
1541c535e923SClaudiu Manoil
1542c535e923SClaudiu Manoil qm_mr_cci_consume(&p->p, num);
1543e21c7316SRoy Pledge qman_p_irqsource_add(p, QM_PIRQ_MRI);
1544c535e923SClaudiu Manoil preempt_enable();
1545c535e923SClaudiu Manoil }
1546c535e923SClaudiu Manoil
__poll_portal_slow(struct qman_portal * p,u32 is)1547c535e923SClaudiu Manoil static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1548c535e923SClaudiu Manoil {
1549c535e923SClaudiu Manoil if (is & QM_PIRQ_CSCI) {
1550e21c7316SRoy Pledge qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1551c535e923SClaudiu Manoil queue_work_on(smp_processor_id(), qm_portal_wq,
1552c535e923SClaudiu Manoil &p->congestion_work);
1553c535e923SClaudiu Manoil }
1554c535e923SClaudiu Manoil
1555c535e923SClaudiu Manoil if (is & QM_PIRQ_EQRI) {
1556c535e923SClaudiu Manoil qm_eqcr_cce_update(&p->p);
1557c535e923SClaudiu Manoil qm_eqcr_set_ithresh(&p->p, 0);
1558c535e923SClaudiu Manoil wake_up(&affine_queue);
1559c535e923SClaudiu Manoil }
1560c535e923SClaudiu Manoil
1561c535e923SClaudiu Manoil if (is & QM_PIRQ_MRI) {
1562e21c7316SRoy Pledge qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1563c535e923SClaudiu Manoil queue_work_on(smp_processor_id(), qm_portal_wq,
1564c535e923SClaudiu Manoil &p->mr_work);
1565c535e923SClaudiu Manoil }
1566c535e923SClaudiu Manoil
1567c535e923SClaudiu Manoil return is;
1568c535e923SClaudiu Manoil }
1569c535e923SClaudiu Manoil
1570c535e923SClaudiu Manoil /*
1571c535e923SClaudiu Manoil * remove some slowish-path stuff from the "fast path" and make sure it isn't
1572c535e923SClaudiu Manoil * inlined.
1573c535e923SClaudiu Manoil */
clear_vdqcr(struct qman_portal * p,struct qman_fq * fq)1574c535e923SClaudiu Manoil static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1575c535e923SClaudiu Manoil {
1576c535e923SClaudiu Manoil p->vdqcr_owned = NULL;
1577c535e923SClaudiu Manoil fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1578c535e923SClaudiu Manoil wake_up(&affine_queue);
1579c535e923SClaudiu Manoil }
1580c535e923SClaudiu Manoil
1581c535e923SClaudiu Manoil /*
1582c535e923SClaudiu Manoil * The only states that would conflict with other things if they ran at the
1583c535e923SClaudiu Manoil * same time on the same cpu are:
1584c535e923SClaudiu Manoil *
1585c535e923SClaudiu Manoil * (i) setting/clearing vdqcr_owned, and
1586c535e923SClaudiu Manoil * (ii) clearing the NE (Not Empty) flag.
1587c535e923SClaudiu Manoil *
1588c535e923SClaudiu Manoil * Both are safe. Because;
1589c535e923SClaudiu Manoil *
1590c535e923SClaudiu Manoil * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1591c535e923SClaudiu Manoil * vdqcr_owned field (which it does before setting VDQCR), and
1592c535e923SClaudiu Manoil * qman_volatile_dequeue() blocks interrupts and preemption while this is
1593c535e923SClaudiu Manoil * done so that we can't interfere.
1594c535e923SClaudiu Manoil * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1595c535e923SClaudiu Manoil * with (i) that API prevents us from interfering until it's safe.
1596c535e923SClaudiu Manoil *
1597c535e923SClaudiu Manoil * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1598c535e923SClaudiu Manoil * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1599c535e923SClaudiu Manoil * advantage comes from this function not having to "lock" anything at all.
1600c535e923SClaudiu Manoil *
1601c535e923SClaudiu Manoil * Note also that the callbacks are invoked at points which are safe against the
1602c535e923SClaudiu Manoil * above potential conflicts, but that this function itself is not re-entrant
1603c535e923SClaudiu Manoil * (this is because the function tracks one end of each FIFO in the portal and
1604c535e923SClaudiu Manoil * we do *not* want to lock that). So the consequence is that it is safe for
1605c535e923SClaudiu Manoil * user callbacks to call into any QMan API.
1606c535e923SClaudiu Manoil */
__poll_portal_fast(struct qman_portal * p,unsigned int poll_limit,bool sched_napi)1607c535e923SClaudiu Manoil static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1608f84754dbSSebastian Andrzej Siewior unsigned int poll_limit, bool sched_napi)
1609c535e923SClaudiu Manoil {
1610c535e923SClaudiu Manoil const struct qm_dqrr_entry *dq;
1611c535e923SClaudiu Manoil struct qman_fq *fq;
1612c535e923SClaudiu Manoil enum qman_cb_dqrr_result res;
1613c535e923SClaudiu Manoil unsigned int limit = 0;
1614c535e923SClaudiu Manoil
1615c535e923SClaudiu Manoil do {
1616c535e923SClaudiu Manoil qm_dqrr_pvb_update(&p->p);
1617c535e923SClaudiu Manoil dq = qm_dqrr_current(&p->p);
1618c535e923SClaudiu Manoil if (!dq)
1619c535e923SClaudiu Manoil break;
1620c535e923SClaudiu Manoil
1621c535e923SClaudiu Manoil if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1622c535e923SClaudiu Manoil /*
1623efe848cdSClaudiu Manoil * VDQCR: don't trust context_b as the FQ may have
1624c535e923SClaudiu Manoil * been configured for h/w consumption and we're
1625c535e923SClaudiu Manoil * draining it post-retirement.
1626c535e923SClaudiu Manoil */
1627c535e923SClaudiu Manoil fq = p->vdqcr_owned;
1628c535e923SClaudiu Manoil /*
1629c535e923SClaudiu Manoil * We only set QMAN_FQ_STATE_NE when retiring, so we
1630c535e923SClaudiu Manoil * only need to check for clearing it when doing
1631c535e923SClaudiu Manoil * volatile dequeues. It's one less thing to check
1632c535e923SClaudiu Manoil * in the critical path (SDQCR).
1633c535e923SClaudiu Manoil */
1634c535e923SClaudiu Manoil if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1635c535e923SClaudiu Manoil fq_clear(fq, QMAN_FQ_STATE_NE);
1636c535e923SClaudiu Manoil /*
1637c535e923SClaudiu Manoil * This is duplicated from the SDQCR code, but we
1638c535e923SClaudiu Manoil * have stuff to do before *and* after this callback,
1639c535e923SClaudiu Manoil * and we don't want multiple if()s in the critical
1640c535e923SClaudiu Manoil * path (SDQCR).
1641c535e923SClaudiu Manoil */
1642f84754dbSSebastian Andrzej Siewior res = fq->cb.dqrr(p, fq, dq, sched_napi);
1643c535e923SClaudiu Manoil if (res == qman_cb_dqrr_stop)
1644c535e923SClaudiu Manoil break;
1645c535e923SClaudiu Manoil /* Check for VDQCR completion */
1646c535e923SClaudiu Manoil if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1647c535e923SClaudiu Manoil clear_vdqcr(p, fq);
1648c535e923SClaudiu Manoil } else {
1649efe848cdSClaudiu Manoil /* SDQCR: context_b points to the FQ */
165018058822SClaudiu Manoil fq = tag_to_fq(be32_to_cpu(dq->context_b));
1651c535e923SClaudiu Manoil /* Now let the callback do its stuff */
1652f84754dbSSebastian Andrzej Siewior res = fq->cb.dqrr(p, fq, dq, sched_napi);
1653c535e923SClaudiu Manoil /*
1654c535e923SClaudiu Manoil * The callback can request that we exit without
1655c535e923SClaudiu Manoil * consuming this entry nor advancing;
1656c535e923SClaudiu Manoil */
1657c535e923SClaudiu Manoil if (res == qman_cb_dqrr_stop)
1658c535e923SClaudiu Manoil break;
1659c535e923SClaudiu Manoil }
1660c535e923SClaudiu Manoil /* Interpret 'dq' from a driver perspective. */
1661c535e923SClaudiu Manoil /*
1662c535e923SClaudiu Manoil * Parking isn't possible unless HELDACTIVE was set. NB,
1663c535e923SClaudiu Manoil * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1664c535e923SClaudiu Manoil * check for HELDACTIVE to cover both.
1665c535e923SClaudiu Manoil */
1666c535e923SClaudiu Manoil DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1667c535e923SClaudiu Manoil (res != qman_cb_dqrr_park));
1668c535e923SClaudiu Manoil /* just means "skip it, I'll consume it myself later on" */
1669c535e923SClaudiu Manoil if (res != qman_cb_dqrr_defer)
1670c535e923SClaudiu Manoil qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1671c535e923SClaudiu Manoil res == qman_cb_dqrr_park);
1672c535e923SClaudiu Manoil /* Move forward */
1673c535e923SClaudiu Manoil qm_dqrr_next(&p->p);
1674c535e923SClaudiu Manoil /*
1675c535e923SClaudiu Manoil * Entry processed and consumed, increment our counter. The
1676c535e923SClaudiu Manoil * callback can request that we exit after consuming the
1677c535e923SClaudiu Manoil * entry, and we also exit if we reach our processing limit,
1678c535e923SClaudiu Manoil * so loop back only if neither of these conditions is met.
1679c535e923SClaudiu Manoil */
1680c535e923SClaudiu Manoil } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1681c535e923SClaudiu Manoil
1682c535e923SClaudiu Manoil return limit;
1683c535e923SClaudiu Manoil }
1684c535e923SClaudiu Manoil
qman_p_irqsource_add(struct qman_portal * p,u32 bits)1685c535e923SClaudiu Manoil void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1686c535e923SClaudiu Manoil {
1687c535e923SClaudiu Manoil unsigned long irqflags;
1688c535e923SClaudiu Manoil
1689c535e923SClaudiu Manoil local_irq_save(irqflags);
1690f5bd2299SMadalin Bucur p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1691c535e923SClaudiu Manoil qm_out(&p->p, QM_REG_IER, p->irq_sources);
1692c535e923SClaudiu Manoil local_irq_restore(irqflags);
1693c535e923SClaudiu Manoil }
1694c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_p_irqsource_add);
1695c535e923SClaudiu Manoil
qman_p_irqsource_remove(struct qman_portal * p,u32 bits)1696c535e923SClaudiu Manoil void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1697c535e923SClaudiu Manoil {
1698c535e923SClaudiu Manoil unsigned long irqflags;
1699c535e923SClaudiu Manoil u32 ier;
1700c535e923SClaudiu Manoil
1701c535e923SClaudiu Manoil /*
1702c535e923SClaudiu Manoil * Our interrupt handler only processes+clears status register bits that
1703c535e923SClaudiu Manoil * are in p->irq_sources. As we're trimming that mask, if one of them
1704c535e923SClaudiu Manoil * were to assert in the status register just before we remove it from
1705c535e923SClaudiu Manoil * the enable register, there would be an interrupt-storm when we
1706c535e923SClaudiu Manoil * release the IRQ lock. So we wait for the enable register update to
1707c535e923SClaudiu Manoil * take effect in h/w (by reading it back) and then clear all other bits
1708c535e923SClaudiu Manoil * in the status register. Ie. we clear them from ISR once it's certain
1709c535e923SClaudiu Manoil * IER won't allow them to reassert.
1710c535e923SClaudiu Manoil */
1711c535e923SClaudiu Manoil local_irq_save(irqflags);
1712c535e923SClaudiu Manoil bits &= QM_PIRQ_VISIBLE;
1713f5bd2299SMadalin Bucur p->irq_sources &= ~bits;
1714c535e923SClaudiu Manoil qm_out(&p->p, QM_REG_IER, p->irq_sources);
1715c535e923SClaudiu Manoil ier = qm_in(&p->p, QM_REG_IER);
1716c535e923SClaudiu Manoil /*
1717c535e923SClaudiu Manoil * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1718c535e923SClaudiu Manoil * data-dependency, ie. to protect against re-ordering.
1719c535e923SClaudiu Manoil */
1720c535e923SClaudiu Manoil qm_out(&p->p, QM_REG_ISR, ~ier);
1721c535e923SClaudiu Manoil local_irq_restore(irqflags);
1722c535e923SClaudiu Manoil }
1723c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_p_irqsource_remove);
1724c535e923SClaudiu Manoil
qman_affine_cpus(void)1725c535e923SClaudiu Manoil const cpumask_t *qman_affine_cpus(void)
1726c535e923SClaudiu Manoil {
1727c535e923SClaudiu Manoil return &affine_mask;
1728c535e923SClaudiu Manoil }
1729c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_affine_cpus);
1730c535e923SClaudiu Manoil
qman_affine_channel(int cpu)1731c535e923SClaudiu Manoil u16 qman_affine_channel(int cpu)
1732c535e923SClaudiu Manoil {
1733c535e923SClaudiu Manoil if (cpu < 0) {
1734c535e923SClaudiu Manoil struct qman_portal *portal = get_affine_portal();
1735c535e923SClaudiu Manoil
1736c535e923SClaudiu Manoil cpu = portal->config->cpu;
1737c535e923SClaudiu Manoil put_affine_portal();
1738c535e923SClaudiu Manoil }
1739c535e923SClaudiu Manoil WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1740c535e923SClaudiu Manoil return affine_channels[cpu];
1741c535e923SClaudiu Manoil }
1742c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_affine_channel);
1743c535e923SClaudiu Manoil
qman_get_affine_portal(int cpu)1744c535e923SClaudiu Manoil struct qman_portal *qman_get_affine_portal(int cpu)
1745c535e923SClaudiu Manoil {
1746c535e923SClaudiu Manoil return affine_portals[cpu];
1747c535e923SClaudiu Manoil }
1748c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_get_affine_portal);
1749c535e923SClaudiu Manoil
qman_start_using_portal(struct qman_portal * p,struct device * dev)1750a2d00f3dSMadalin Bucur int qman_start_using_portal(struct qman_portal *p, struct device *dev)
1751a2d00f3dSMadalin Bucur {
1752a2d00f3dSMadalin Bucur return (!device_link_add(dev, p->config->dev,
1753a2d00f3dSMadalin Bucur DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
1754a2d00f3dSMadalin Bucur }
1755a2d00f3dSMadalin Bucur EXPORT_SYMBOL(qman_start_using_portal);
1756a2d00f3dSMadalin Bucur
qman_p_poll_dqrr(struct qman_portal * p,unsigned int limit)1757c535e923SClaudiu Manoil int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1758c535e923SClaudiu Manoil {
1759f84754dbSSebastian Andrzej Siewior return __poll_portal_fast(p, limit, false);
1760c535e923SClaudiu Manoil }
1761c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_p_poll_dqrr);
1762c535e923SClaudiu Manoil
qman_p_static_dequeue_add(struct qman_portal * p,u32 pools)1763c535e923SClaudiu Manoil void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1764c535e923SClaudiu Manoil {
1765c535e923SClaudiu Manoil unsigned long irqflags;
1766c535e923SClaudiu Manoil
1767c535e923SClaudiu Manoil local_irq_save(irqflags);
1768c535e923SClaudiu Manoil pools &= p->config->pools;
1769c535e923SClaudiu Manoil p->sdqcr |= pools;
1770c535e923SClaudiu Manoil qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1771c535e923SClaudiu Manoil local_irq_restore(irqflags);
1772c535e923SClaudiu Manoil }
1773c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_p_static_dequeue_add);
1774c535e923SClaudiu Manoil
1775c535e923SClaudiu Manoil /* Frame queue API */
1776c535e923SClaudiu Manoil
mcr_result_str(u8 result)1777c535e923SClaudiu Manoil static const char *mcr_result_str(u8 result)
1778c535e923SClaudiu Manoil {
1779c535e923SClaudiu Manoil switch (result) {
1780c535e923SClaudiu Manoil case QM_MCR_RESULT_NULL:
1781c535e923SClaudiu Manoil return "QM_MCR_RESULT_NULL";
1782c535e923SClaudiu Manoil case QM_MCR_RESULT_OK:
1783c535e923SClaudiu Manoil return "QM_MCR_RESULT_OK";
1784c535e923SClaudiu Manoil case QM_MCR_RESULT_ERR_FQID:
1785c535e923SClaudiu Manoil return "QM_MCR_RESULT_ERR_FQID";
1786c535e923SClaudiu Manoil case QM_MCR_RESULT_ERR_FQSTATE:
1787c535e923SClaudiu Manoil return "QM_MCR_RESULT_ERR_FQSTATE";
1788c535e923SClaudiu Manoil case QM_MCR_RESULT_ERR_NOTEMPTY:
1789c535e923SClaudiu Manoil return "QM_MCR_RESULT_ERR_NOTEMPTY";
1790c535e923SClaudiu Manoil case QM_MCR_RESULT_PENDING:
1791c535e923SClaudiu Manoil return "QM_MCR_RESULT_PENDING";
1792c535e923SClaudiu Manoil case QM_MCR_RESULT_ERR_BADCOMMAND:
1793c535e923SClaudiu Manoil return "QM_MCR_RESULT_ERR_BADCOMMAND";
1794c535e923SClaudiu Manoil }
1795c535e923SClaudiu Manoil return "<unknown MCR result>";
1796c535e923SClaudiu Manoil }
1797c535e923SClaudiu Manoil
qman_create_fq(u32 fqid,u32 flags,struct qman_fq * fq)1798c535e923SClaudiu Manoil int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1799c535e923SClaudiu Manoil {
1800c535e923SClaudiu Manoil if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1801c535e923SClaudiu Manoil int ret = qman_alloc_fqid(&fqid);
1802c535e923SClaudiu Manoil
1803c535e923SClaudiu Manoil if (ret)
1804c535e923SClaudiu Manoil return ret;
1805c535e923SClaudiu Manoil }
1806c535e923SClaudiu Manoil fq->fqid = fqid;
1807c535e923SClaudiu Manoil fq->flags = flags;
1808c535e923SClaudiu Manoil fq->state = qman_fq_state_oos;
1809c535e923SClaudiu Manoil fq->cgr_groupid = 0;
1810c535e923SClaudiu Manoil
1811c535e923SClaudiu Manoil /* A context_b of 0 is allegedly special, so don't use that fqid */
1812c535e923SClaudiu Manoil if (fqid == 0 || fqid >= num_fqids) {
1813c535e923SClaudiu Manoil WARN(1, "bad fqid %d\n", fqid);
1814c535e923SClaudiu Manoil return -EINVAL;
1815c535e923SClaudiu Manoil }
1816c535e923SClaudiu Manoil
1817c535e923SClaudiu Manoil fq->idx = fqid * 2;
1818c535e923SClaudiu Manoil if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1819c535e923SClaudiu Manoil fq->idx++;
1820c535e923SClaudiu Manoil
1821c535e923SClaudiu Manoil WARN_ON(fq_table[fq->idx]);
1822c535e923SClaudiu Manoil fq_table[fq->idx] = fq;
1823c535e923SClaudiu Manoil
1824c535e923SClaudiu Manoil return 0;
1825c535e923SClaudiu Manoil }
1826c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_create_fq);
1827c535e923SClaudiu Manoil
qman_destroy_fq(struct qman_fq * fq)1828c535e923SClaudiu Manoil void qman_destroy_fq(struct qman_fq *fq)
1829c535e923SClaudiu Manoil {
1830c535e923SClaudiu Manoil /*
1831c535e923SClaudiu Manoil * We don't need to lock the FQ as it is a pre-condition that the FQ be
1832c535e923SClaudiu Manoil * quiesced. Instead, run some checks.
1833c535e923SClaudiu Manoil */
1834c535e923SClaudiu Manoil switch (fq->state) {
1835c535e923SClaudiu Manoil case qman_fq_state_parked:
1836c535e923SClaudiu Manoil case qman_fq_state_oos:
1837c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1838c535e923SClaudiu Manoil qman_release_fqid(fq->fqid);
1839c535e923SClaudiu Manoil
1840c535e923SClaudiu Manoil DPAA_ASSERT(fq_table[fq->idx]);
1841c535e923SClaudiu Manoil fq_table[fq->idx] = NULL;
1842c535e923SClaudiu Manoil return;
1843c535e923SClaudiu Manoil default:
1844c535e923SClaudiu Manoil break;
1845c535e923SClaudiu Manoil }
1846c535e923SClaudiu Manoil DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1847c535e923SClaudiu Manoil }
1848c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_destroy_fq);
1849c535e923SClaudiu Manoil
qman_fq_fqid(struct qman_fq * fq)1850c535e923SClaudiu Manoil u32 qman_fq_fqid(struct qman_fq *fq)
1851c535e923SClaudiu Manoil {
1852c535e923SClaudiu Manoil return fq->fqid;
1853c535e923SClaudiu Manoil }
1854c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_fq_fqid);
1855c535e923SClaudiu Manoil
qman_init_fq(struct qman_fq * fq,u32 flags,struct qm_mcc_initfq * opts)1856c535e923SClaudiu Manoil int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1857c535e923SClaudiu Manoil {
1858c535e923SClaudiu Manoil union qm_mc_command *mcc;
1859c535e923SClaudiu Manoil union qm_mc_result *mcr;
1860c535e923SClaudiu Manoil struct qman_portal *p;
1861c535e923SClaudiu Manoil u8 res, myverb;
1862c535e923SClaudiu Manoil int ret = 0;
1863c535e923SClaudiu Manoil
1864c535e923SClaudiu Manoil myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1865c535e923SClaudiu Manoil ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1866c535e923SClaudiu Manoil
1867c535e923SClaudiu Manoil if (fq->state != qman_fq_state_oos &&
1868c535e923SClaudiu Manoil fq->state != qman_fq_state_parked)
1869c535e923SClaudiu Manoil return -EINVAL;
1870c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
1871c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1872c535e923SClaudiu Manoil return -EINVAL;
1873c535e923SClaudiu Manoil #endif
187418058822SClaudiu Manoil if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1875c535e923SClaudiu Manoil /* And can't be set at the same time as TDTHRESH */
187618058822SClaudiu Manoil if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1877c535e923SClaudiu Manoil return -EINVAL;
1878c535e923SClaudiu Manoil }
1879c535e923SClaudiu Manoil /* Issue an INITFQ_[PARKED|SCHED] management command */
1880c535e923SClaudiu Manoil p = get_affine_portal();
1881c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1882c535e923SClaudiu Manoil (fq->state != qman_fq_state_oos &&
1883c535e923SClaudiu Manoil fq->state != qman_fq_state_parked)) {
1884c535e923SClaudiu Manoil ret = -EBUSY;
1885c535e923SClaudiu Manoil goto out;
1886c535e923SClaudiu Manoil }
1887c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
1888c535e923SClaudiu Manoil if (opts)
1889c535e923SClaudiu Manoil mcc->initfq = *opts;
1890d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
1891c535e923SClaudiu Manoil mcc->initfq.count = 0;
1892c535e923SClaudiu Manoil /*
1893efe848cdSClaudiu Manoil * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1894c535e923SClaudiu Manoil * demux pointer. Otherwise, the caller-provided value is allowed to
1895c535e923SClaudiu Manoil * stand, don't overwrite it.
1896c535e923SClaudiu Manoil */
1897c535e923SClaudiu Manoil if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1898c535e923SClaudiu Manoil dma_addr_t phys_fq;
1899c535e923SClaudiu Manoil
190018058822SClaudiu Manoil mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
190118058822SClaudiu Manoil mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1902c535e923SClaudiu Manoil /*
1903c535e923SClaudiu Manoil * and the physical address - NB, if the user wasn't trying to
1904c535e923SClaudiu Manoil * set CONTEXTA, clear the stashing settings.
1905c535e923SClaudiu Manoil */
190618058822SClaudiu Manoil if (!(be16_to_cpu(mcc->initfq.we_mask) &
190718058822SClaudiu Manoil QM_INITFQ_WE_CONTEXTA)) {
190818058822SClaudiu Manoil mcc->initfq.we_mask |=
190918058822SClaudiu Manoil cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1910c535e923SClaudiu Manoil memset(&mcc->initfq.fqd.context_a, 0,
1911c535e923SClaudiu Manoil sizeof(mcc->initfq.fqd.context_a));
1912c535e923SClaudiu Manoil } else {
19130fbeac3bSClaudiu Manoil struct qman_portal *p = qman_dma_portal;
19140fbeac3bSClaudiu Manoil
19150fbeac3bSClaudiu Manoil phys_fq = dma_map_single(p->config->dev, fq,
19160fbeac3bSClaudiu Manoil sizeof(*fq), DMA_TO_DEVICE);
19170fbeac3bSClaudiu Manoil if (dma_mapping_error(p->config->dev, phys_fq)) {
19180fbeac3bSClaudiu Manoil dev_err(p->config->dev, "dma_mapping failed\n");
19190fbeac3bSClaudiu Manoil ret = -EIO;
19200fbeac3bSClaudiu Manoil goto out;
19210fbeac3bSClaudiu Manoil }
19220fbeac3bSClaudiu Manoil
1923c535e923SClaudiu Manoil qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1924c535e923SClaudiu Manoil }
1925c535e923SClaudiu Manoil }
1926c535e923SClaudiu Manoil if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1927c535e923SClaudiu Manoil int wq = 0;
1928c535e923SClaudiu Manoil
192918058822SClaudiu Manoil if (!(be16_to_cpu(mcc->initfq.we_mask) &
193018058822SClaudiu Manoil QM_INITFQ_WE_DESTWQ)) {
193118058822SClaudiu Manoil mcc->initfq.we_mask |=
193218058822SClaudiu Manoil cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1933c535e923SClaudiu Manoil wq = 4;
1934c535e923SClaudiu Manoil }
1935c535e923SClaudiu Manoil qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1936c535e923SClaudiu Manoil }
1937c535e923SClaudiu Manoil qm_mc_commit(&p->p, myverb);
1938c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
1939c535e923SClaudiu Manoil dev_err(p->config->dev, "MCR timeout\n");
1940c535e923SClaudiu Manoil ret = -ETIMEDOUT;
1941c535e923SClaudiu Manoil goto out;
1942c535e923SClaudiu Manoil }
1943c535e923SClaudiu Manoil
1944c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1945c535e923SClaudiu Manoil res = mcr->result;
1946c535e923SClaudiu Manoil if (res != QM_MCR_RESULT_OK) {
1947c535e923SClaudiu Manoil ret = -EIO;
1948c535e923SClaudiu Manoil goto out;
1949c535e923SClaudiu Manoil }
1950c535e923SClaudiu Manoil if (opts) {
195118058822SClaudiu Manoil if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
195218058822SClaudiu Manoil if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1953c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1954c535e923SClaudiu Manoil else
1955c535e923SClaudiu Manoil fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1956c535e923SClaudiu Manoil }
195718058822SClaudiu Manoil if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1958c535e923SClaudiu Manoil fq->cgr_groupid = opts->fqd.cgid;
1959c535e923SClaudiu Manoil }
1960c535e923SClaudiu Manoil fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1961c535e923SClaudiu Manoil qman_fq_state_sched : qman_fq_state_parked;
1962c535e923SClaudiu Manoil
1963c535e923SClaudiu Manoil out:
1964c535e923SClaudiu Manoil put_affine_portal();
1965c535e923SClaudiu Manoil return ret;
1966c535e923SClaudiu Manoil }
1967c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_init_fq);
1968c535e923SClaudiu Manoil
qman_schedule_fq(struct qman_fq * fq)1969c535e923SClaudiu Manoil int qman_schedule_fq(struct qman_fq *fq)
1970c535e923SClaudiu Manoil {
1971c535e923SClaudiu Manoil union qm_mc_command *mcc;
1972c535e923SClaudiu Manoil union qm_mc_result *mcr;
1973c535e923SClaudiu Manoil struct qman_portal *p;
1974c535e923SClaudiu Manoil int ret = 0;
1975c535e923SClaudiu Manoil
1976c535e923SClaudiu Manoil if (fq->state != qman_fq_state_parked)
1977c535e923SClaudiu Manoil return -EINVAL;
1978c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
1979c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1980c535e923SClaudiu Manoil return -EINVAL;
1981c535e923SClaudiu Manoil #endif
1982c535e923SClaudiu Manoil /* Issue a ALTERFQ_SCHED management command */
1983c535e923SClaudiu Manoil p = get_affine_portal();
1984c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1985c535e923SClaudiu Manoil fq->state != qman_fq_state_parked) {
1986c535e923SClaudiu Manoil ret = -EBUSY;
1987c535e923SClaudiu Manoil goto out;
1988c535e923SClaudiu Manoil }
1989c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
1990d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
1991c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1992c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
1993c535e923SClaudiu Manoil dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1994c535e923SClaudiu Manoil ret = -ETIMEDOUT;
1995c535e923SClaudiu Manoil goto out;
1996c535e923SClaudiu Manoil }
1997c535e923SClaudiu Manoil
1998c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1999c535e923SClaudiu Manoil if (mcr->result != QM_MCR_RESULT_OK) {
2000c535e923SClaudiu Manoil ret = -EIO;
2001c535e923SClaudiu Manoil goto out;
2002c535e923SClaudiu Manoil }
2003c535e923SClaudiu Manoil fq->state = qman_fq_state_sched;
2004c535e923SClaudiu Manoil out:
2005c535e923SClaudiu Manoil put_affine_portal();
2006c535e923SClaudiu Manoil return ret;
2007c535e923SClaudiu Manoil }
2008c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_schedule_fq);
2009c535e923SClaudiu Manoil
qman_retire_fq(struct qman_fq * fq,u32 * flags)2010c535e923SClaudiu Manoil int qman_retire_fq(struct qman_fq *fq, u32 *flags)
2011c535e923SClaudiu Manoil {
2012c535e923SClaudiu Manoil union qm_mc_command *mcc;
2013c535e923SClaudiu Manoil union qm_mc_result *mcr;
2014c535e923SClaudiu Manoil struct qman_portal *p;
2015c535e923SClaudiu Manoil int ret;
2016c535e923SClaudiu Manoil u8 res;
2017c535e923SClaudiu Manoil
2018c535e923SClaudiu Manoil if (fq->state != qman_fq_state_parked &&
2019c535e923SClaudiu Manoil fq->state != qman_fq_state_sched)
2020c535e923SClaudiu Manoil return -EINVAL;
2021c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
2022c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2023c535e923SClaudiu Manoil return -EINVAL;
2024c535e923SClaudiu Manoil #endif
2025c535e923SClaudiu Manoil p = get_affine_portal();
2026c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2027c535e923SClaudiu Manoil fq->state == qman_fq_state_retired ||
2028c535e923SClaudiu Manoil fq->state == qman_fq_state_oos) {
2029c535e923SClaudiu Manoil ret = -EBUSY;
2030c535e923SClaudiu Manoil goto out;
2031c535e923SClaudiu Manoil }
2032c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2033d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
2034c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2035c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2036c535e923SClaudiu Manoil dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2037c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2038c535e923SClaudiu Manoil goto out;
2039c535e923SClaudiu Manoil }
2040c535e923SClaudiu Manoil
2041c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2042c535e923SClaudiu Manoil res = mcr->result;
2043c535e923SClaudiu Manoil /*
2044c535e923SClaudiu Manoil * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
2045c535e923SClaudiu Manoil * and defer the flags until FQRNI or FQRN (respectively) show up. But
2046c535e923SClaudiu Manoil * "Friendly" is to process OK immediately, and not set CHANGING. We do
2047c535e923SClaudiu Manoil * friendly, otherwise the caller doesn't necessarily have a fully
2048c535e923SClaudiu Manoil * "retired" FQ on return even if the retirement was immediate. However
2049c535e923SClaudiu Manoil * this does mean some code duplication between here and
2050c535e923SClaudiu Manoil * fq_state_change().
2051c535e923SClaudiu Manoil */
2052c535e923SClaudiu Manoil if (res == QM_MCR_RESULT_OK) {
2053c535e923SClaudiu Manoil ret = 0;
2054c535e923SClaudiu Manoil /* Process 'fq' right away, we'll ignore FQRNI */
2055c535e923SClaudiu Manoil if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2056c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_NE);
2057c535e923SClaudiu Manoil if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2058c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_ORL);
2059c535e923SClaudiu Manoil if (flags)
2060c535e923SClaudiu Manoil *flags = fq->flags;
2061c535e923SClaudiu Manoil fq->state = qman_fq_state_retired;
2062c535e923SClaudiu Manoil if (fq->cb.fqs) {
2063c535e923SClaudiu Manoil /*
2064c535e923SClaudiu Manoil * Another issue with supporting "immediate" retirement
2065c535e923SClaudiu Manoil * is that we're forced to drop FQRNIs, because by the
2066c535e923SClaudiu Manoil * time they're seen it may already be "too late" (the
2067c535e923SClaudiu Manoil * fq may have been OOS'd and free()'d already). But if
2068c535e923SClaudiu Manoil * the upper layer wants a callback whether it's
2069c535e923SClaudiu Manoil * immediate or not, we have to fake a "MR" entry to
2070c535e923SClaudiu Manoil * look like an FQRNI...
2071c535e923SClaudiu Manoil */
2072c535e923SClaudiu Manoil union qm_mr_entry msg;
2073c535e923SClaudiu Manoil
2074c535e923SClaudiu Manoil msg.verb = QM_MR_VERB_FQRNI;
2075c535e923SClaudiu Manoil msg.fq.fqs = mcr->alterfq.fqs;
2076d6753c7eSClaudiu Manoil qm_fqid_set(&msg.fq, fq->fqid);
207718058822SClaudiu Manoil msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2078c535e923SClaudiu Manoil fq->cb.fqs(p, fq, &msg);
2079c535e923SClaudiu Manoil }
2080c535e923SClaudiu Manoil } else if (res == QM_MCR_RESULT_PENDING) {
2081c535e923SClaudiu Manoil ret = 1;
2082c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_CHANGING);
2083c535e923SClaudiu Manoil } else {
2084c535e923SClaudiu Manoil ret = -EIO;
2085c535e923SClaudiu Manoil }
2086c535e923SClaudiu Manoil out:
2087c535e923SClaudiu Manoil put_affine_portal();
2088c535e923SClaudiu Manoil return ret;
2089c535e923SClaudiu Manoil }
2090c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_retire_fq);
2091c535e923SClaudiu Manoil
qman_oos_fq(struct qman_fq * fq)2092c535e923SClaudiu Manoil int qman_oos_fq(struct qman_fq *fq)
2093c535e923SClaudiu Manoil {
2094c535e923SClaudiu Manoil union qm_mc_command *mcc;
2095c535e923SClaudiu Manoil union qm_mc_result *mcr;
2096c535e923SClaudiu Manoil struct qman_portal *p;
2097c535e923SClaudiu Manoil int ret = 0;
2098c535e923SClaudiu Manoil
2099c535e923SClaudiu Manoil if (fq->state != qman_fq_state_retired)
2100c535e923SClaudiu Manoil return -EINVAL;
2101c535e923SClaudiu Manoil #ifdef CONFIG_FSL_DPAA_CHECKING
2102c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2103c535e923SClaudiu Manoil return -EINVAL;
2104c535e923SClaudiu Manoil #endif
2105c535e923SClaudiu Manoil p = get_affine_portal();
2106c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2107c535e923SClaudiu Manoil fq->state != qman_fq_state_retired) {
2108c535e923SClaudiu Manoil ret = -EBUSY;
2109c535e923SClaudiu Manoil goto out;
2110c535e923SClaudiu Manoil }
2111c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2112d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
2113c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2114c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2115c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2116c535e923SClaudiu Manoil goto out;
2117c535e923SClaudiu Manoil }
2118c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2119c535e923SClaudiu Manoil if (mcr->result != QM_MCR_RESULT_OK) {
2120c535e923SClaudiu Manoil ret = -EIO;
2121c535e923SClaudiu Manoil goto out;
2122c535e923SClaudiu Manoil }
2123c535e923SClaudiu Manoil fq->state = qman_fq_state_oos;
2124c535e923SClaudiu Manoil out:
2125c535e923SClaudiu Manoil put_affine_portal();
2126c535e923SClaudiu Manoil return ret;
2127c535e923SClaudiu Manoil }
2128c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_oos_fq);
2129c535e923SClaudiu Manoil
qman_query_fq(struct qman_fq * fq,struct qm_fqd * fqd)2130c535e923SClaudiu Manoil int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2131c535e923SClaudiu Manoil {
2132c535e923SClaudiu Manoil union qm_mc_command *mcc;
2133c535e923SClaudiu Manoil union qm_mc_result *mcr;
2134c535e923SClaudiu Manoil struct qman_portal *p = get_affine_portal();
2135c535e923SClaudiu Manoil int ret = 0;
2136c535e923SClaudiu Manoil
2137c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2138d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
2139c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2140c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2141c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2142c535e923SClaudiu Manoil goto out;
2143c535e923SClaudiu Manoil }
2144c535e923SClaudiu Manoil
2145c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2146c535e923SClaudiu Manoil if (mcr->result == QM_MCR_RESULT_OK)
2147c535e923SClaudiu Manoil *fqd = mcr->queryfq.fqd;
2148c535e923SClaudiu Manoil else
2149c535e923SClaudiu Manoil ret = -EIO;
2150c535e923SClaudiu Manoil out:
2151c535e923SClaudiu Manoil put_affine_portal();
2152c535e923SClaudiu Manoil return ret;
2153c535e923SClaudiu Manoil }
2154c535e923SClaudiu Manoil
qman_query_fq_np(struct qman_fq * fq,struct qm_mcr_queryfq_np * np)21558496272dSHoria Geantă int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2156c535e923SClaudiu Manoil {
2157c535e923SClaudiu Manoil union qm_mc_command *mcc;
2158c535e923SClaudiu Manoil union qm_mc_result *mcr;
2159c535e923SClaudiu Manoil struct qman_portal *p = get_affine_portal();
2160c535e923SClaudiu Manoil int ret = 0;
2161c535e923SClaudiu Manoil
2162c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2163d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fq->fqid);
2164c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2165c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2166c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2167c535e923SClaudiu Manoil goto out;
2168c535e923SClaudiu Manoil }
2169c535e923SClaudiu Manoil
2170c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2171c535e923SClaudiu Manoil if (mcr->result == QM_MCR_RESULT_OK)
2172c535e923SClaudiu Manoil *np = mcr->queryfq_np;
2173c535e923SClaudiu Manoil else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2174c535e923SClaudiu Manoil ret = -ERANGE;
2175c535e923SClaudiu Manoil else
2176c535e923SClaudiu Manoil ret = -EIO;
2177c535e923SClaudiu Manoil out:
2178c535e923SClaudiu Manoil put_affine_portal();
2179c535e923SClaudiu Manoil return ret;
2180c535e923SClaudiu Manoil }
21818496272dSHoria Geantă EXPORT_SYMBOL(qman_query_fq_np);
2182c535e923SClaudiu Manoil
qman_query_cgr(struct qman_cgr * cgr,struct qm_mcr_querycgr * cgrd)2183c535e923SClaudiu Manoil static int qman_query_cgr(struct qman_cgr *cgr,
2184c535e923SClaudiu Manoil struct qm_mcr_querycgr *cgrd)
2185c535e923SClaudiu Manoil {
2186c535e923SClaudiu Manoil union qm_mc_command *mcc;
2187c535e923SClaudiu Manoil union qm_mc_result *mcr;
2188c535e923SClaudiu Manoil struct qman_portal *p = get_affine_portal();
2189c535e923SClaudiu Manoil int ret = 0;
2190c535e923SClaudiu Manoil
2191c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
21927ff07da0SClaudiu Manoil mcc->cgr.cgid = cgr->cgrid;
2193c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2194c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2195c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2196c535e923SClaudiu Manoil goto out;
2197c535e923SClaudiu Manoil }
2198c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2199c535e923SClaudiu Manoil if (mcr->result == QM_MCR_RESULT_OK)
2200c535e923SClaudiu Manoil *cgrd = mcr->querycgr;
2201c535e923SClaudiu Manoil else {
2202c535e923SClaudiu Manoil dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2203c535e923SClaudiu Manoil mcr_result_str(mcr->result));
2204c535e923SClaudiu Manoil ret = -EIO;
2205c535e923SClaudiu Manoil }
2206c535e923SClaudiu Manoil out:
2207c535e923SClaudiu Manoil put_affine_portal();
2208c535e923SClaudiu Manoil return ret;
2209c535e923SClaudiu Manoil }
2210c535e923SClaudiu Manoil
qman_query_cgr_congested(struct qman_cgr * cgr,bool * result)2211c535e923SClaudiu Manoil int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2212c535e923SClaudiu Manoil {
2213c535e923SClaudiu Manoil struct qm_mcr_querycgr query_cgr;
2214c535e923SClaudiu Manoil int err;
2215c535e923SClaudiu Manoil
2216c535e923SClaudiu Manoil err = qman_query_cgr(cgr, &query_cgr);
2217c535e923SClaudiu Manoil if (err)
2218c535e923SClaudiu Manoil return err;
2219c535e923SClaudiu Manoil
2220c535e923SClaudiu Manoil *result = !!query_cgr.cgr.cs;
2221c535e923SClaudiu Manoil return 0;
2222c535e923SClaudiu Manoil }
2223c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_query_cgr_congested);
2224c535e923SClaudiu Manoil
2225c535e923SClaudiu Manoil /* internal function used as a wait_event() expression */
set_p_vdqcr(struct qman_portal * p,struct qman_fq * fq,u32 vdqcr)2226c535e923SClaudiu Manoil static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2227c535e923SClaudiu Manoil {
2228c535e923SClaudiu Manoil unsigned long irqflags;
2229c535e923SClaudiu Manoil int ret = -EBUSY;
2230c535e923SClaudiu Manoil
2231c535e923SClaudiu Manoil local_irq_save(irqflags);
2232c535e923SClaudiu Manoil if (p->vdqcr_owned)
2233c535e923SClaudiu Manoil goto out;
2234c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2235c535e923SClaudiu Manoil goto out;
2236c535e923SClaudiu Manoil
2237c535e923SClaudiu Manoil fq_set(fq, QMAN_FQ_STATE_VDQCR);
2238c535e923SClaudiu Manoil p->vdqcr_owned = fq;
2239c535e923SClaudiu Manoil qm_dqrr_vdqcr_set(&p->p, vdqcr);
2240c535e923SClaudiu Manoil ret = 0;
2241c535e923SClaudiu Manoil out:
2242c535e923SClaudiu Manoil local_irq_restore(irqflags);
2243c535e923SClaudiu Manoil return ret;
2244c535e923SClaudiu Manoil }
2245c535e923SClaudiu Manoil
set_vdqcr(struct qman_portal ** p,struct qman_fq * fq,u32 vdqcr)2246c535e923SClaudiu Manoil static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2247c535e923SClaudiu Manoil {
2248c535e923SClaudiu Manoil int ret;
2249c535e923SClaudiu Manoil
2250c535e923SClaudiu Manoil *p = get_affine_portal();
2251c535e923SClaudiu Manoil ret = set_p_vdqcr(*p, fq, vdqcr);
2252c535e923SClaudiu Manoil put_affine_portal();
2253c535e923SClaudiu Manoil return ret;
2254c535e923SClaudiu Manoil }
2255c535e923SClaudiu Manoil
wait_vdqcr_start(struct qman_portal ** p,struct qman_fq * fq,u32 vdqcr,u32 flags)2256c535e923SClaudiu Manoil static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2257c535e923SClaudiu Manoil u32 vdqcr, u32 flags)
2258c535e923SClaudiu Manoil {
2259c535e923SClaudiu Manoil int ret = 0;
2260c535e923SClaudiu Manoil
2261c535e923SClaudiu Manoil if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2262c535e923SClaudiu Manoil ret = wait_event_interruptible(affine_queue,
2263c535e923SClaudiu Manoil !set_vdqcr(p, fq, vdqcr));
2264c535e923SClaudiu Manoil else
2265c535e923SClaudiu Manoil wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2266c535e923SClaudiu Manoil return ret;
2267c535e923SClaudiu Manoil }
2268c535e923SClaudiu Manoil
qman_volatile_dequeue(struct qman_fq * fq,u32 flags,u32 vdqcr)2269c535e923SClaudiu Manoil int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2270c535e923SClaudiu Manoil {
2271c535e923SClaudiu Manoil struct qman_portal *p;
2272c535e923SClaudiu Manoil int ret;
2273c535e923SClaudiu Manoil
2274c535e923SClaudiu Manoil if (fq->state != qman_fq_state_parked &&
2275c535e923SClaudiu Manoil fq->state != qman_fq_state_retired)
2276c535e923SClaudiu Manoil return -EINVAL;
2277c535e923SClaudiu Manoil if (vdqcr & QM_VDQCR_FQID_MASK)
2278c535e923SClaudiu Manoil return -EINVAL;
2279c535e923SClaudiu Manoil if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2280c535e923SClaudiu Manoil return -EBUSY;
2281c535e923SClaudiu Manoil vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2282c535e923SClaudiu Manoil if (flags & QMAN_VOLATILE_FLAG_WAIT)
2283c535e923SClaudiu Manoil ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2284c535e923SClaudiu Manoil else
2285c535e923SClaudiu Manoil ret = set_vdqcr(&p, fq, vdqcr);
2286c535e923SClaudiu Manoil if (ret)
2287c535e923SClaudiu Manoil return ret;
2288c535e923SClaudiu Manoil /* VDQCR is set */
2289c535e923SClaudiu Manoil if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2290c535e923SClaudiu Manoil if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2291c535e923SClaudiu Manoil /*
2292c535e923SClaudiu Manoil * NB: don't propagate any error - the caller wouldn't
2293c535e923SClaudiu Manoil * know whether the VDQCR was issued or not. A signal
2294c535e923SClaudiu Manoil * could arrive after returning anyway, so the caller
2295c535e923SClaudiu Manoil * can check signal_pending() if that's an issue.
2296c535e923SClaudiu Manoil */
2297c535e923SClaudiu Manoil wait_event_interruptible(affine_queue,
2298c535e923SClaudiu Manoil !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2299c535e923SClaudiu Manoil else
2300c535e923SClaudiu Manoil wait_event(affine_queue,
2301c535e923SClaudiu Manoil !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2302c535e923SClaudiu Manoil }
2303c535e923SClaudiu Manoil return 0;
2304c535e923SClaudiu Manoil }
2305c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_volatile_dequeue);
2306c535e923SClaudiu Manoil
update_eqcr_ci(struct qman_portal * p,u8 avail)2307c535e923SClaudiu Manoil static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2308c535e923SClaudiu Manoil {
2309c535e923SClaudiu Manoil if (avail)
2310c535e923SClaudiu Manoil qm_eqcr_cce_prefetch(&p->p);
2311c535e923SClaudiu Manoil else
2312c535e923SClaudiu Manoil qm_eqcr_cce_update(&p->p);
2313c535e923SClaudiu Manoil }
2314c535e923SClaudiu Manoil
qman_enqueue(struct qman_fq * fq,const struct qm_fd * fd)2315c535e923SClaudiu Manoil int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2316c535e923SClaudiu Manoil {
2317c535e923SClaudiu Manoil struct qman_portal *p;
2318c535e923SClaudiu Manoil struct qm_eqcr_entry *eq;
2319c535e923SClaudiu Manoil unsigned long irqflags;
2320c535e923SClaudiu Manoil u8 avail;
2321c535e923SClaudiu Manoil
2322c535e923SClaudiu Manoil p = get_affine_portal();
2323c535e923SClaudiu Manoil local_irq_save(irqflags);
2324c535e923SClaudiu Manoil
2325c535e923SClaudiu Manoil if (p->use_eqcr_ci_stashing) {
2326c535e923SClaudiu Manoil /*
2327c535e923SClaudiu Manoil * The stashing case is easy, only update if we need to in
2328c535e923SClaudiu Manoil * order to try and liberate ring entries.
2329c535e923SClaudiu Manoil */
2330c535e923SClaudiu Manoil eq = qm_eqcr_start_stash(&p->p);
2331c535e923SClaudiu Manoil } else {
2332c535e923SClaudiu Manoil /*
2333c535e923SClaudiu Manoil * The non-stashing case is harder, need to prefetch ahead of
2334c535e923SClaudiu Manoil * time.
2335c535e923SClaudiu Manoil */
2336c535e923SClaudiu Manoil avail = qm_eqcr_get_avail(&p->p);
2337c535e923SClaudiu Manoil if (avail < 2)
2338c535e923SClaudiu Manoil update_eqcr_ci(p, avail);
2339c535e923SClaudiu Manoil eq = qm_eqcr_start_no_stash(&p->p);
2340c535e923SClaudiu Manoil }
2341c535e923SClaudiu Manoil
2342c535e923SClaudiu Manoil if (unlikely(!eq))
2343c535e923SClaudiu Manoil goto out;
2344c535e923SClaudiu Manoil
2345d6753c7eSClaudiu Manoil qm_fqid_set(eq, fq->fqid);
234618058822SClaudiu Manoil eq->tag = cpu_to_be32(fq_to_tag(fq));
2347c535e923SClaudiu Manoil eq->fd = *fd;
2348c535e923SClaudiu Manoil
2349c535e923SClaudiu Manoil qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2350c535e923SClaudiu Manoil out:
2351c535e923SClaudiu Manoil local_irq_restore(irqflags);
2352c535e923SClaudiu Manoil put_affine_portal();
2353c535e923SClaudiu Manoil return 0;
2354c535e923SClaudiu Manoil }
2355c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_enqueue);
2356c535e923SClaudiu Manoil
qm_modify_cgr(struct qman_cgr * cgr,u32 flags,struct qm_mcc_initcgr * opts)2357c535e923SClaudiu Manoil static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2358c535e923SClaudiu Manoil struct qm_mcc_initcgr *opts)
2359c535e923SClaudiu Manoil {
2360c535e923SClaudiu Manoil union qm_mc_command *mcc;
2361c535e923SClaudiu Manoil union qm_mc_result *mcr;
2362c535e923SClaudiu Manoil struct qman_portal *p = get_affine_portal();
2363c535e923SClaudiu Manoil u8 verb = QM_MCC_VERB_MODIFYCGR;
2364c535e923SClaudiu Manoil int ret = 0;
2365c535e923SClaudiu Manoil
2366c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2367c535e923SClaudiu Manoil if (opts)
2368c535e923SClaudiu Manoil mcc->initcgr = *opts;
2369c535e923SClaudiu Manoil mcc->initcgr.cgid = cgr->cgrid;
2370c535e923SClaudiu Manoil if (flags & QMAN_CGR_FLAG_USE_INIT)
2371c535e923SClaudiu Manoil verb = QM_MCC_VERB_INITCGR;
2372c535e923SClaudiu Manoil qm_mc_commit(&p->p, verb);
2373c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2374c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2375c535e923SClaudiu Manoil goto out;
2376c535e923SClaudiu Manoil }
2377c535e923SClaudiu Manoil
2378c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2379c535e923SClaudiu Manoil if (mcr->result != QM_MCR_RESULT_OK)
2380c535e923SClaudiu Manoil ret = -EIO;
2381c535e923SClaudiu Manoil
2382c535e923SClaudiu Manoil out:
2383c535e923SClaudiu Manoil put_affine_portal();
2384c535e923SClaudiu Manoil return ret;
2385c535e923SClaudiu Manoil }
2386c535e923SClaudiu Manoil
2387c535e923SClaudiu Manoil #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2388496bfa11SClaudiu Manoil
2389496bfa11SClaudiu Manoil /* congestion state change notification target update control */
qm_cgr_cscn_targ_set(struct __qm_mc_cgr * cgr,int pi,u32 val)2390496bfa11SClaudiu Manoil static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2391496bfa11SClaudiu Manoil {
2392496bfa11SClaudiu Manoil if (qman_ip_rev >= QMAN_REV30)
239318058822SClaudiu Manoil cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
239418058822SClaudiu Manoil QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2395496bfa11SClaudiu Manoil else
239618058822SClaudiu Manoil cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2397496bfa11SClaudiu Manoil }
2398496bfa11SClaudiu Manoil
qm_cgr_cscn_targ_clear(struct __qm_mc_cgr * cgr,int pi,u32 val)2399496bfa11SClaudiu Manoil static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2400496bfa11SClaudiu Manoil {
2401496bfa11SClaudiu Manoil if (qman_ip_rev >= QMAN_REV30)
240218058822SClaudiu Manoil cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2403496bfa11SClaudiu Manoil else
240418058822SClaudiu Manoil cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2405496bfa11SClaudiu Manoil }
2406c535e923SClaudiu Manoil
2407c535e923SClaudiu Manoil static u8 qman_cgr_cpus[CGR_NUM];
2408c535e923SClaudiu Manoil
qman_init_cgr_all(void)2409c535e923SClaudiu Manoil void qman_init_cgr_all(void)
2410c535e923SClaudiu Manoil {
2411c535e923SClaudiu Manoil struct qman_cgr cgr;
2412c535e923SClaudiu Manoil int err_cnt = 0;
2413c535e923SClaudiu Manoil
2414c535e923SClaudiu Manoil for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2415c535e923SClaudiu Manoil if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2416c535e923SClaudiu Manoil err_cnt++;
2417c535e923SClaudiu Manoil }
2418c535e923SClaudiu Manoil
2419c535e923SClaudiu Manoil if (err_cnt)
2420c535e923SClaudiu Manoil pr_err("Warning: %d error%s while initialising CGR h/w\n",
2421c535e923SClaudiu Manoil err_cnt, (err_cnt > 1) ? "s" : "");
2422c535e923SClaudiu Manoil }
2423c535e923SClaudiu Manoil
qman_create_cgr(struct qman_cgr * cgr,u32 flags,struct qm_mcc_initcgr * opts)2424c535e923SClaudiu Manoil int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2425c535e923SClaudiu Manoil struct qm_mcc_initcgr *opts)
2426c535e923SClaudiu Manoil {
2427c535e923SClaudiu Manoil struct qm_mcr_querycgr cgr_state;
2428c535e923SClaudiu Manoil int ret;
2429c535e923SClaudiu Manoil struct qman_portal *p;
2430c535e923SClaudiu Manoil
2431c535e923SClaudiu Manoil /*
2432c535e923SClaudiu Manoil * We have to check that the provided CGRID is within the limits of the
2433c535e923SClaudiu Manoil * data-structures, for obvious reasons. However we'll let h/w take
2434c535e923SClaudiu Manoil * care of determining whether it's within the limits of what exists on
2435c535e923SClaudiu Manoil * the SoC.
2436c535e923SClaudiu Manoil */
2437c535e923SClaudiu Manoil if (cgr->cgrid >= CGR_NUM)
2438c535e923SClaudiu Manoil return -EINVAL;
2439c535e923SClaudiu Manoil
2440c535e923SClaudiu Manoil preempt_disable();
2441c535e923SClaudiu Manoil p = get_affine_portal();
2442c535e923SClaudiu Manoil qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2443c535e923SClaudiu Manoil preempt_enable();
2444c535e923SClaudiu Manoil
2445c535e923SClaudiu Manoil cgr->chan = p->config->channel;
2446*54d26adfSSean Anderson raw_spin_lock_irq(&p->cgr_lock);
2447c535e923SClaudiu Manoil
2448c535e923SClaudiu Manoil if (opts) {
2449e5caf693SClaudiu Manoil struct qm_mcc_initcgr local_opts = *opts;
2450e5caf693SClaudiu Manoil
2451c535e923SClaudiu Manoil ret = qman_query_cgr(cgr, &cgr_state);
2452c535e923SClaudiu Manoil if (ret)
2453c535e923SClaudiu Manoil goto out;
2454e5caf693SClaudiu Manoil
2455496bfa11SClaudiu Manoil qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
245618058822SClaudiu Manoil be32_to_cpu(cgr_state.cgr.cscn_targ));
245718058822SClaudiu Manoil local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2458c535e923SClaudiu Manoil
2459c535e923SClaudiu Manoil /* send init if flags indicate so */
2460e5caf693SClaudiu Manoil if (flags & QMAN_CGR_FLAG_USE_INIT)
2461c535e923SClaudiu Manoil ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2462c535e923SClaudiu Manoil &local_opts);
2463c535e923SClaudiu Manoil else
2464c535e923SClaudiu Manoil ret = qm_modify_cgr(cgr, 0, &local_opts);
2465c535e923SClaudiu Manoil if (ret)
2466c535e923SClaudiu Manoil goto out;
2467c535e923SClaudiu Manoil }
2468c535e923SClaudiu Manoil
2469c535e923SClaudiu Manoil list_add(&cgr->node, &p->cgr_cbs);
2470c535e923SClaudiu Manoil
2471c535e923SClaudiu Manoil /* Determine if newly added object requires its callback to be called */
2472c535e923SClaudiu Manoil ret = qman_query_cgr(cgr, &cgr_state);
2473c535e923SClaudiu Manoil if (ret) {
2474c535e923SClaudiu Manoil /* we can't go back, so proceed and return success */
2475c535e923SClaudiu Manoil dev_err(p->config->dev, "CGR HW state partially modified\n");
2476c535e923SClaudiu Manoil ret = 0;
2477c535e923SClaudiu Manoil goto out;
2478c535e923SClaudiu Manoil }
2479c535e923SClaudiu Manoil if (cgr->cb && cgr_state.cgr.cscn_en &&
2480c535e923SClaudiu Manoil qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2481c535e923SClaudiu Manoil cgr->cb(p, cgr, 1);
2482c535e923SClaudiu Manoil out:
2483*54d26adfSSean Anderson raw_spin_unlock_irq(&p->cgr_lock);
2484c535e923SClaudiu Manoil put_affine_portal();
2485c535e923SClaudiu Manoil return ret;
2486c535e923SClaudiu Manoil }
2487c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_create_cgr);
2488c535e923SClaudiu Manoil
qman_cgr_get_affine_portal(struct qman_cgr * cgr)2489d0e17a46SSean Anderson static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
2490c535e923SClaudiu Manoil {
2491c535e923SClaudiu Manoil struct qman_portal *p = get_affine_portal();
2492c535e923SClaudiu Manoil
2493c535e923SClaudiu Manoil if (cgr->chan != p->config->channel) {
2494c535e923SClaudiu Manoil /* attempt to delete from other portal than creator */
2495c535e923SClaudiu Manoil dev_err(p->config->dev, "CGR not owned by current portal");
2496c535e923SClaudiu Manoil dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2497c535e923SClaudiu Manoil cgr->chan, p->config->channel);
2498d0e17a46SSean Anderson put_affine_portal();
2499d0e17a46SSean Anderson return NULL;
2500c535e923SClaudiu Manoil }
2501d0e17a46SSean Anderson
2502d0e17a46SSean Anderson return p;
2503d0e17a46SSean Anderson }
2504d0e17a46SSean Anderson
qman_delete_cgr(struct qman_cgr * cgr)2505d0e17a46SSean Anderson int qman_delete_cgr(struct qman_cgr *cgr)
2506d0e17a46SSean Anderson {
2507d0e17a46SSean Anderson unsigned long irqflags;
2508d0e17a46SSean Anderson struct qm_mcr_querycgr cgr_state;
2509d0e17a46SSean Anderson struct qm_mcc_initcgr local_opts;
2510d0e17a46SSean Anderson int ret = 0;
2511d0e17a46SSean Anderson struct qman_cgr *i;
2512d0e17a46SSean Anderson struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
2513d0e17a46SSean Anderson
2514d0e17a46SSean Anderson if (!p)
2515d0e17a46SSean Anderson return -EINVAL;
2516d0e17a46SSean Anderson
2517c535e923SClaudiu Manoil memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2518*54d26adfSSean Anderson raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
2519c535e923SClaudiu Manoil list_del(&cgr->node);
2520c535e923SClaudiu Manoil /*
2521c535e923SClaudiu Manoil * If there are no other CGR objects for this CGRID in the list,
2522c535e923SClaudiu Manoil * update CSCN_TARG accordingly
2523c535e923SClaudiu Manoil */
2524c535e923SClaudiu Manoil list_for_each_entry(i, &p->cgr_cbs, node)
2525c535e923SClaudiu Manoil if (i->cgrid == cgr->cgrid && i->cb)
2526c535e923SClaudiu Manoil goto release_lock;
2527c535e923SClaudiu Manoil ret = qman_query_cgr(cgr, &cgr_state);
2528c535e923SClaudiu Manoil if (ret) {
2529c535e923SClaudiu Manoil /* add back to the list */
2530c535e923SClaudiu Manoil list_add(&cgr->node, &p->cgr_cbs);
2531c535e923SClaudiu Manoil goto release_lock;
2532c535e923SClaudiu Manoil }
2533496bfa11SClaudiu Manoil
253418058822SClaudiu Manoil local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2535496bfa11SClaudiu Manoil qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
253618058822SClaudiu Manoil be32_to_cpu(cgr_state.cgr.cscn_targ));
2537496bfa11SClaudiu Manoil
2538c535e923SClaudiu Manoil ret = qm_modify_cgr(cgr, 0, &local_opts);
2539c535e923SClaudiu Manoil if (ret)
2540c535e923SClaudiu Manoil /* add back to the list */
2541c535e923SClaudiu Manoil list_add(&cgr->node, &p->cgr_cbs);
2542c535e923SClaudiu Manoil release_lock:
2543*54d26adfSSean Anderson raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2544c535e923SClaudiu Manoil put_affine_portal();
2545c535e923SClaudiu Manoil return ret;
2546c535e923SClaudiu Manoil }
2547c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_delete_cgr);
2548c535e923SClaudiu Manoil
2549c535e923SClaudiu Manoil struct cgr_comp {
2550c535e923SClaudiu Manoil struct qman_cgr *cgr;
2551c535e923SClaudiu Manoil struct completion completion;
2552c535e923SClaudiu Manoil };
2553c535e923SClaudiu Manoil
qman_delete_cgr_smp_call(void * p)255496f413f4SMadalin Bucur static void qman_delete_cgr_smp_call(void *p)
2555c535e923SClaudiu Manoil {
255696f413f4SMadalin Bucur qman_delete_cgr((struct qman_cgr *)p);
2557c535e923SClaudiu Manoil }
2558c535e923SClaudiu Manoil
qman_delete_cgr_safe(struct qman_cgr * cgr)2559c535e923SClaudiu Manoil void qman_delete_cgr_safe(struct qman_cgr *cgr)
2560c535e923SClaudiu Manoil {
2561c535e923SClaudiu Manoil preempt_disable();
2562c535e923SClaudiu Manoil if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
256396f413f4SMadalin Bucur smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
256496f413f4SMadalin Bucur qman_delete_cgr_smp_call, cgr, true);
2565c535e923SClaudiu Manoil preempt_enable();
2566c535e923SClaudiu Manoil return;
2567c535e923SClaudiu Manoil }
256896f413f4SMadalin Bucur
2569c535e923SClaudiu Manoil qman_delete_cgr(cgr);
2570c535e923SClaudiu Manoil preempt_enable();
2571c535e923SClaudiu Manoil }
2572c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_delete_cgr_safe);
2573c535e923SClaudiu Manoil
qman_update_cgr(struct qman_cgr * cgr,struct qm_mcc_initcgr * opts)2574914f8b22SSean Anderson static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
2575914f8b22SSean Anderson {
2576914f8b22SSean Anderson int ret;
2577914f8b22SSean Anderson unsigned long irqflags;
2578914f8b22SSean Anderson struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
2579914f8b22SSean Anderson
2580914f8b22SSean Anderson if (!p)
2581914f8b22SSean Anderson return -EINVAL;
2582914f8b22SSean Anderson
2583*54d26adfSSean Anderson raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
2584914f8b22SSean Anderson ret = qm_modify_cgr(cgr, 0, opts);
2585*54d26adfSSean Anderson raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2586914f8b22SSean Anderson put_affine_portal();
2587914f8b22SSean Anderson return ret;
2588914f8b22SSean Anderson }
2589914f8b22SSean Anderson
2590914f8b22SSean Anderson struct update_cgr_params {
2591914f8b22SSean Anderson struct qman_cgr *cgr;
2592914f8b22SSean Anderson struct qm_mcc_initcgr *opts;
2593914f8b22SSean Anderson int ret;
2594914f8b22SSean Anderson };
2595914f8b22SSean Anderson
qman_update_cgr_smp_call(void * p)2596914f8b22SSean Anderson static void qman_update_cgr_smp_call(void *p)
2597914f8b22SSean Anderson {
2598914f8b22SSean Anderson struct update_cgr_params *params = p;
2599914f8b22SSean Anderson
2600914f8b22SSean Anderson params->ret = qman_update_cgr(params->cgr, params->opts);
2601914f8b22SSean Anderson }
2602914f8b22SSean Anderson
qman_update_cgr_safe(struct qman_cgr * cgr,struct qm_mcc_initcgr * opts)2603914f8b22SSean Anderson int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
2604914f8b22SSean Anderson {
2605914f8b22SSean Anderson struct update_cgr_params params = {
2606914f8b22SSean Anderson .cgr = cgr,
2607914f8b22SSean Anderson .opts = opts,
2608914f8b22SSean Anderson };
2609914f8b22SSean Anderson
2610914f8b22SSean Anderson preempt_disable();
2611914f8b22SSean Anderson if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
2612914f8b22SSean Anderson smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2613914f8b22SSean Anderson qman_update_cgr_smp_call, ¶ms,
2614914f8b22SSean Anderson true);
2615914f8b22SSean Anderson else
2616914f8b22SSean Anderson params.ret = qman_update_cgr(cgr, opts);
2617914f8b22SSean Anderson preempt_enable();
2618914f8b22SSean Anderson return params.ret;
2619914f8b22SSean Anderson }
2620914f8b22SSean Anderson EXPORT_SYMBOL(qman_update_cgr_safe);
2621914f8b22SSean Anderson
2622c535e923SClaudiu Manoil /* Cleanup FQs */
2623c535e923SClaudiu Manoil
_qm_mr_consume_and_match_verb(struct qm_portal * p,int v)2624c535e923SClaudiu Manoil static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2625c535e923SClaudiu Manoil {
2626c535e923SClaudiu Manoil const union qm_mr_entry *msg;
2627c535e923SClaudiu Manoil int found = 0;
2628c535e923SClaudiu Manoil
2629c535e923SClaudiu Manoil qm_mr_pvb_update(p);
2630c535e923SClaudiu Manoil msg = qm_mr_current(p);
2631c535e923SClaudiu Manoil while (msg) {
2632c535e923SClaudiu Manoil if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2633c535e923SClaudiu Manoil found = 1;
2634c535e923SClaudiu Manoil qm_mr_next(p);
2635c535e923SClaudiu Manoil qm_mr_cci_consume_to_current(p);
2636c535e923SClaudiu Manoil qm_mr_pvb_update(p);
2637c535e923SClaudiu Manoil msg = qm_mr_current(p);
2638c535e923SClaudiu Manoil }
2639c535e923SClaudiu Manoil return found;
2640c535e923SClaudiu Manoil }
2641c535e923SClaudiu Manoil
_qm_dqrr_consume_and_match(struct qm_portal * p,u32 fqid,int s,bool wait)2642c535e923SClaudiu Manoil static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2643c535e923SClaudiu Manoil bool wait)
2644c535e923SClaudiu Manoil {
2645c535e923SClaudiu Manoil const struct qm_dqrr_entry *dqrr;
2646c535e923SClaudiu Manoil int found = 0;
2647c535e923SClaudiu Manoil
2648c535e923SClaudiu Manoil do {
2649c535e923SClaudiu Manoil qm_dqrr_pvb_update(p);
2650c535e923SClaudiu Manoil dqrr = qm_dqrr_current(p);
2651c535e923SClaudiu Manoil if (!dqrr)
2652c535e923SClaudiu Manoil cpu_relax();
2653c535e923SClaudiu Manoil } while (wait && !dqrr);
2654c535e923SClaudiu Manoil
2655c535e923SClaudiu Manoil while (dqrr) {
2656d6753c7eSClaudiu Manoil if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2657c535e923SClaudiu Manoil found = 1;
2658c535e923SClaudiu Manoil qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2659c535e923SClaudiu Manoil qm_dqrr_pvb_update(p);
2660c535e923SClaudiu Manoil qm_dqrr_next(p);
2661c535e923SClaudiu Manoil dqrr = qm_dqrr_current(p);
2662c535e923SClaudiu Manoil }
2663c535e923SClaudiu Manoil return found;
2664c535e923SClaudiu Manoil }
2665c535e923SClaudiu Manoil
2666c535e923SClaudiu Manoil #define qm_mr_drain(p, V) \
2667c535e923SClaudiu Manoil _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2668c535e923SClaudiu Manoil
2669c535e923SClaudiu Manoil #define qm_dqrr_drain(p, f, S) \
2670c535e923SClaudiu Manoil _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2671c535e923SClaudiu Manoil
2672c535e923SClaudiu Manoil #define qm_dqrr_drain_wait(p, f, S) \
2673c535e923SClaudiu Manoil _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2674c535e923SClaudiu Manoil
2675c535e923SClaudiu Manoil #define qm_dqrr_drain_nomatch(p) \
2676c535e923SClaudiu Manoil _qm_dqrr_consume_and_match(p, 0, 0, false)
2677c535e923SClaudiu Manoil
qman_shutdown_fq(u32 fqid)2678c5501aa9SRoy Pledge int qman_shutdown_fq(u32 fqid)
2679c535e923SClaudiu Manoil {
2680e844168aSRoy Pledge struct qman_portal *p, *channel_portal;
2681c535e923SClaudiu Manoil struct device *dev;
2682c535e923SClaudiu Manoil union qm_mc_command *mcc;
2683c535e923SClaudiu Manoil union qm_mc_result *mcr;
2684c535e923SClaudiu Manoil int orl_empty, drain = 0, ret = 0;
26853d1d8f29SLee Jones u32 channel, res;
2686c535e923SClaudiu Manoil u8 state;
2687c535e923SClaudiu Manoil
2688c535e923SClaudiu Manoil p = get_affine_portal();
2689c535e923SClaudiu Manoil dev = p->config->dev;
2690c535e923SClaudiu Manoil /* Determine the state of the FQID */
2691c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2692d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fqid);
2693c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2694c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2695c535e923SClaudiu Manoil dev_err(dev, "QUERYFQ_NP timeout\n");
2696c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2697c535e923SClaudiu Manoil goto out;
2698c535e923SClaudiu Manoil }
2699c535e923SClaudiu Manoil
2700c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2701c535e923SClaudiu Manoil state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2702c535e923SClaudiu Manoil if (state == QM_MCR_NP_STATE_OOS)
2703c535e923SClaudiu Manoil goto out; /* Already OOS, no need to do anymore checks */
2704c535e923SClaudiu Manoil
2705c535e923SClaudiu Manoil /* Query which channel the FQ is using */
2706c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2707d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fqid);
2708c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2709c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2710c535e923SClaudiu Manoil dev_err(dev, "QUERYFQ timeout\n");
2711c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2712c535e923SClaudiu Manoil goto out;
2713c535e923SClaudiu Manoil }
2714c535e923SClaudiu Manoil
2715c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2716c535e923SClaudiu Manoil /* Need to store these since the MCR gets reused */
2717c535e923SClaudiu Manoil channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
27183d1d8f29SLee Jones qm_fqd_get_wq(&mcr->queryfq.fqd);
2719c535e923SClaudiu Manoil
2720e844168aSRoy Pledge if (channel < qm_channel_pool1) {
2721e844168aSRoy Pledge channel_portal = get_portal_for_channel(channel);
2722e844168aSRoy Pledge if (channel_portal == NULL) {
2723e844168aSRoy Pledge dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
2724e844168aSRoy Pledge channel);
2725e844168aSRoy Pledge ret = -EIO;
2726e844168aSRoy Pledge goto out;
2727e844168aSRoy Pledge }
2728e844168aSRoy Pledge } else
2729e844168aSRoy Pledge channel_portal = p;
2730e844168aSRoy Pledge
2731c535e923SClaudiu Manoil switch (state) {
2732c535e923SClaudiu Manoil case QM_MCR_NP_STATE_TEN_SCHED:
2733c535e923SClaudiu Manoil case QM_MCR_NP_STATE_TRU_SCHED:
2734c535e923SClaudiu Manoil case QM_MCR_NP_STATE_ACTIVE:
2735c535e923SClaudiu Manoil case QM_MCR_NP_STATE_PARKED:
2736c535e923SClaudiu Manoil orl_empty = 0;
2737e844168aSRoy Pledge mcc = qm_mc_start(&channel_portal->p);
2738d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fqid);
2739e844168aSRoy Pledge qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
2740e844168aSRoy Pledge if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
2741e844168aSRoy Pledge dev_err(dev, "ALTER_RETIRE timeout\n");
2742c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2743c535e923SClaudiu Manoil goto out;
2744c535e923SClaudiu Manoil }
2745c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2746c535e923SClaudiu Manoil QM_MCR_VERB_ALTER_RETIRE);
2747c535e923SClaudiu Manoil res = mcr->result; /* Make a copy as we reuse MCR below */
2748c535e923SClaudiu Manoil
2749e844168aSRoy Pledge if (res == QM_MCR_RESULT_OK)
2750e844168aSRoy Pledge drain_mr_fqrni(&channel_portal->p);
2751e844168aSRoy Pledge
2752c535e923SClaudiu Manoil if (res == QM_MCR_RESULT_PENDING) {
2753c535e923SClaudiu Manoil /*
2754c535e923SClaudiu Manoil * Need to wait for the FQRN in the message ring, which
2755c535e923SClaudiu Manoil * will only occur once the FQ has been drained. In
2756c535e923SClaudiu Manoil * order for the FQ to drain the portal needs to be set
2757c535e923SClaudiu Manoil * to dequeue from the channel the FQ is scheduled on
2758c535e923SClaudiu Manoil */
2759c535e923SClaudiu Manoil int found_fqrn = 0;
2760c535e923SClaudiu Manoil
2761c535e923SClaudiu Manoil /* Flag that we need to drain FQ */
2762c535e923SClaudiu Manoil drain = 1;
2763c535e923SClaudiu Manoil
2764c535e923SClaudiu Manoil if (channel >= qm_channel_pool1 &&
2765c535e923SClaudiu Manoil channel < qm_channel_pool1 + 15) {
2766c535e923SClaudiu Manoil /* Pool channel, enable the bit in the portal */
2767c535e923SClaudiu Manoil } else if (channel < qm_channel_pool1) {
2768c535e923SClaudiu Manoil /* Dedicated channel */
2769c535e923SClaudiu Manoil } else {
2770c535e923SClaudiu Manoil dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2771c535e923SClaudiu Manoil fqid, channel);
2772c535e923SClaudiu Manoil ret = -EBUSY;
2773c535e923SClaudiu Manoil goto out;
2774c535e923SClaudiu Manoil }
2775c535e923SClaudiu Manoil /* Set the sdqcr to drain this channel */
2776c535e923SClaudiu Manoil if (channel < qm_channel_pool1)
2777e844168aSRoy Pledge qm_dqrr_sdqcr_set(&channel_portal->p,
2778c535e923SClaudiu Manoil QM_SDQCR_TYPE_ACTIVE |
2779c535e923SClaudiu Manoil QM_SDQCR_CHANNELS_DEDICATED);
2780c535e923SClaudiu Manoil else
2781e844168aSRoy Pledge qm_dqrr_sdqcr_set(&channel_portal->p,
2782c535e923SClaudiu Manoil QM_SDQCR_TYPE_ACTIVE |
2783c535e923SClaudiu Manoil QM_SDQCR_CHANNELS_POOL_CONV
2784c535e923SClaudiu Manoil (channel));
2785c535e923SClaudiu Manoil do {
2786c535e923SClaudiu Manoil /* Keep draining DQRR while checking the MR*/
2787e844168aSRoy Pledge qm_dqrr_drain_nomatch(&channel_portal->p);
2788c535e923SClaudiu Manoil /* Process message ring too */
2789e844168aSRoy Pledge found_fqrn = qm_mr_drain(&channel_portal->p,
2790e844168aSRoy Pledge FQRN);
2791c535e923SClaudiu Manoil cpu_relax();
2792c535e923SClaudiu Manoil } while (!found_fqrn);
2793e844168aSRoy Pledge /* Restore SDQCR */
2794e844168aSRoy Pledge qm_dqrr_sdqcr_set(&channel_portal->p,
2795e844168aSRoy Pledge channel_portal->sdqcr);
2796c535e923SClaudiu Manoil
2797c535e923SClaudiu Manoil }
2798c535e923SClaudiu Manoil if (res != QM_MCR_RESULT_OK &&
2799c535e923SClaudiu Manoil res != QM_MCR_RESULT_PENDING) {
2800c535e923SClaudiu Manoil dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2801c535e923SClaudiu Manoil fqid, res);
2802c535e923SClaudiu Manoil ret = -EIO;
2803c535e923SClaudiu Manoil goto out;
2804c535e923SClaudiu Manoil }
2805c535e923SClaudiu Manoil if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2806c535e923SClaudiu Manoil /*
2807c535e923SClaudiu Manoil * ORL had no entries, no need to wait until the
2808c535e923SClaudiu Manoil * ERNs come in
2809c535e923SClaudiu Manoil */
2810c535e923SClaudiu Manoil orl_empty = 1;
2811c535e923SClaudiu Manoil }
2812c535e923SClaudiu Manoil /*
2813c535e923SClaudiu Manoil * Retirement succeeded, check to see if FQ needs
2814c535e923SClaudiu Manoil * to be drained
2815c535e923SClaudiu Manoil */
2816c535e923SClaudiu Manoil if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2817c535e923SClaudiu Manoil /* FQ is Not Empty, drain using volatile DQ commands */
2818c535e923SClaudiu Manoil do {
2819c535e923SClaudiu Manoil u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2820c535e923SClaudiu Manoil
2821c535e923SClaudiu Manoil qm_dqrr_vdqcr_set(&p->p, vdqcr);
2822c535e923SClaudiu Manoil /*
2823c535e923SClaudiu Manoil * Wait for a dequeue and process the dequeues,
2824c535e923SClaudiu Manoil * making sure to empty the ring completely
2825c535e923SClaudiu Manoil */
2826e844168aSRoy Pledge } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2827c535e923SClaudiu Manoil }
2828c535e923SClaudiu Manoil
2829c535e923SClaudiu Manoil while (!orl_empty) {
2830c535e923SClaudiu Manoil /* Wait for the ORL to have been completely drained */
2831c535e923SClaudiu Manoil orl_empty = qm_mr_drain(&p->p, FQRL);
2832c535e923SClaudiu Manoil cpu_relax();
2833c535e923SClaudiu Manoil }
2834c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2835d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fqid);
2836c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2837c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2838c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2839c535e923SClaudiu Manoil goto out;
2840c535e923SClaudiu Manoil }
2841c535e923SClaudiu Manoil
2842c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2843c535e923SClaudiu Manoil QM_MCR_VERB_ALTER_OOS);
2844c535e923SClaudiu Manoil if (mcr->result != QM_MCR_RESULT_OK) {
2845c535e923SClaudiu Manoil dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2846c535e923SClaudiu Manoil fqid, mcr->result);
2847c535e923SClaudiu Manoil ret = -EIO;
2848c535e923SClaudiu Manoil goto out;
2849c535e923SClaudiu Manoil }
2850c535e923SClaudiu Manoil break;
2851c535e923SClaudiu Manoil
2852c535e923SClaudiu Manoil case QM_MCR_NP_STATE_RETIRED:
2853c535e923SClaudiu Manoil /* Send OOS Command */
2854c535e923SClaudiu Manoil mcc = qm_mc_start(&p->p);
2855d6753c7eSClaudiu Manoil qm_fqid_set(&mcc->fq, fqid);
2856c535e923SClaudiu Manoil qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2857c535e923SClaudiu Manoil if (!qm_mc_result_timeout(&p->p, &mcr)) {
2858c535e923SClaudiu Manoil ret = -ETIMEDOUT;
2859c535e923SClaudiu Manoil goto out;
2860c535e923SClaudiu Manoil }
2861c535e923SClaudiu Manoil
2862c535e923SClaudiu Manoil DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2863c535e923SClaudiu Manoil QM_MCR_VERB_ALTER_OOS);
2864c5501aa9SRoy Pledge if (mcr->result != QM_MCR_RESULT_OK) {
2865c535e923SClaudiu Manoil dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2866c535e923SClaudiu Manoil fqid, mcr->result);
2867c535e923SClaudiu Manoil ret = -EIO;
2868c535e923SClaudiu Manoil goto out;
2869c535e923SClaudiu Manoil }
2870c535e923SClaudiu Manoil break;
2871c535e923SClaudiu Manoil
2872c535e923SClaudiu Manoil case QM_MCR_NP_STATE_OOS:
2873c535e923SClaudiu Manoil /* Done */
2874c535e923SClaudiu Manoil break;
2875c535e923SClaudiu Manoil
2876c535e923SClaudiu Manoil default:
2877c535e923SClaudiu Manoil ret = -EIO;
2878c535e923SClaudiu Manoil }
2879c535e923SClaudiu Manoil
2880c535e923SClaudiu Manoil out:
2881c535e923SClaudiu Manoil put_affine_portal();
2882c535e923SClaudiu Manoil return ret;
2883c535e923SClaudiu Manoil }
2884c535e923SClaudiu Manoil
qman_get_qm_portal_config(struct qman_portal * portal)2885c535e923SClaudiu Manoil const struct qm_portal_config *qman_get_qm_portal_config(
2886c535e923SClaudiu Manoil struct qman_portal *portal)
2887c535e923SClaudiu Manoil {
2888c535e923SClaudiu Manoil return portal->config;
2889c535e923SClaudiu Manoil }
2890021ba010SClaudiu Manoil EXPORT_SYMBOL(qman_get_qm_portal_config);
2891c535e923SClaudiu Manoil
2892c535e923SClaudiu Manoil struct gen_pool *qm_fqalloc; /* FQID allocator */
2893c535e923SClaudiu Manoil struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2894c535e923SClaudiu Manoil struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2895c535e923SClaudiu Manoil
qman_alloc_range(struct gen_pool * p,u32 * result,u32 cnt)2896c535e923SClaudiu Manoil static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2897c535e923SClaudiu Manoil {
2898c535e923SClaudiu Manoil unsigned long addr;
2899c535e923SClaudiu Manoil
290064e9e22eSAlexandre Belloni if (!p)
290164e9e22eSAlexandre Belloni return -ENODEV;
290264e9e22eSAlexandre Belloni
2903c535e923SClaudiu Manoil addr = gen_pool_alloc(p, cnt);
2904c535e923SClaudiu Manoil if (!addr)
2905c535e923SClaudiu Manoil return -ENOMEM;
2906c535e923SClaudiu Manoil
2907c535e923SClaudiu Manoil *result = addr & ~DPAA_GENALLOC_OFF;
2908c535e923SClaudiu Manoil
2909c535e923SClaudiu Manoil return 0;
2910c535e923SClaudiu Manoil }
2911c535e923SClaudiu Manoil
qman_alloc_fqid_range(u32 * result,u32 count)2912c535e923SClaudiu Manoil int qman_alloc_fqid_range(u32 *result, u32 count)
2913c535e923SClaudiu Manoil {
2914c535e923SClaudiu Manoil return qman_alloc_range(qm_fqalloc, result, count);
2915c535e923SClaudiu Manoil }
2916c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_alloc_fqid_range);
2917c535e923SClaudiu Manoil
qman_alloc_pool_range(u32 * result,u32 count)2918c535e923SClaudiu Manoil int qman_alloc_pool_range(u32 *result, u32 count)
2919c535e923SClaudiu Manoil {
2920c535e923SClaudiu Manoil return qman_alloc_range(qm_qpalloc, result, count);
2921c535e923SClaudiu Manoil }
2922c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_alloc_pool_range);
2923c535e923SClaudiu Manoil
qman_alloc_cgrid_range(u32 * result,u32 count)2924c535e923SClaudiu Manoil int qman_alloc_cgrid_range(u32 *result, u32 count)
2925c535e923SClaudiu Manoil {
2926c535e923SClaudiu Manoil return qman_alloc_range(qm_cgralloc, result, count);
2927c535e923SClaudiu Manoil }
2928c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_alloc_cgrid_range);
2929c535e923SClaudiu Manoil
qman_release_fqid(u32 fqid)2930c535e923SClaudiu Manoil int qman_release_fqid(u32 fqid)
2931c535e923SClaudiu Manoil {
2932c535e923SClaudiu Manoil int ret = qman_shutdown_fq(fqid);
2933c535e923SClaudiu Manoil
2934c535e923SClaudiu Manoil if (ret) {
2935c535e923SClaudiu Manoil pr_debug("FQID %d leaked\n", fqid);
2936c535e923SClaudiu Manoil return ret;
2937c535e923SClaudiu Manoil }
2938c535e923SClaudiu Manoil
2939c535e923SClaudiu Manoil gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2940c535e923SClaudiu Manoil return 0;
2941c535e923SClaudiu Manoil }
2942c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_release_fqid);
2943c535e923SClaudiu Manoil
qpool_cleanup(u32 qp)2944c535e923SClaudiu Manoil static int qpool_cleanup(u32 qp)
2945c535e923SClaudiu Manoil {
2946c535e923SClaudiu Manoil /*
2947c535e923SClaudiu Manoil * We query all FQDs starting from
2948c535e923SClaudiu Manoil * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2949c535e923SClaudiu Manoil * whose destination channel is the pool-channel being released.
2950c535e923SClaudiu Manoil * When a non-OOS FQD is found we attempt to clean it up
2951c535e923SClaudiu Manoil */
2952c535e923SClaudiu Manoil struct qman_fq fq = {
2953c535e923SClaudiu Manoil .fqid = QM_FQID_RANGE_START
2954c535e923SClaudiu Manoil };
2955c535e923SClaudiu Manoil int err;
2956c535e923SClaudiu Manoil
2957c535e923SClaudiu Manoil do {
2958c535e923SClaudiu Manoil struct qm_mcr_queryfq_np np;
2959c535e923SClaudiu Manoil
2960c535e923SClaudiu Manoil err = qman_query_fq_np(&fq, &np);
2961d95cb0d3SClaudiu Manoil if (err == -ERANGE)
2962c535e923SClaudiu Manoil /* FQID range exceeded, found no problems */
2963c535e923SClaudiu Manoil return 0;
2964d95cb0d3SClaudiu Manoil else if (WARN_ON(err))
2965d95cb0d3SClaudiu Manoil return err;
2966d95cb0d3SClaudiu Manoil
2967c535e923SClaudiu Manoil if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2968c535e923SClaudiu Manoil struct qm_fqd fqd;
2969c535e923SClaudiu Manoil
2970c535e923SClaudiu Manoil err = qman_query_fq(&fq, &fqd);
2971c535e923SClaudiu Manoil if (WARN_ON(err))
2972d95cb0d3SClaudiu Manoil return err;
2973c535e923SClaudiu Manoil if (qm_fqd_get_chan(&fqd) == qp) {
2974c535e923SClaudiu Manoil /* The channel is the FQ's target, clean it */
2975c535e923SClaudiu Manoil err = qman_shutdown_fq(fq.fqid);
2976c535e923SClaudiu Manoil if (err)
2977c535e923SClaudiu Manoil /*
2978c535e923SClaudiu Manoil * Couldn't shut down the FQ
2979c535e923SClaudiu Manoil * so the pool must be leaked
2980c535e923SClaudiu Manoil */
2981c535e923SClaudiu Manoil return err;
2982c535e923SClaudiu Manoil }
2983c535e923SClaudiu Manoil }
2984c535e923SClaudiu Manoil /* Move to the next FQID */
2985c535e923SClaudiu Manoil fq.fqid++;
2986c535e923SClaudiu Manoil } while (1);
2987c535e923SClaudiu Manoil }
2988c535e923SClaudiu Manoil
qman_release_pool(u32 qp)2989c535e923SClaudiu Manoil int qman_release_pool(u32 qp)
2990c535e923SClaudiu Manoil {
2991c535e923SClaudiu Manoil int ret;
2992c535e923SClaudiu Manoil
2993c535e923SClaudiu Manoil ret = qpool_cleanup(qp);
2994c535e923SClaudiu Manoil if (ret) {
2995c535e923SClaudiu Manoil pr_debug("CHID %d leaked\n", qp);
2996c535e923SClaudiu Manoil return ret;
2997c535e923SClaudiu Manoil }
2998c535e923SClaudiu Manoil
2999c535e923SClaudiu Manoil gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
3000c535e923SClaudiu Manoil return 0;
3001c535e923SClaudiu Manoil }
3002c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_release_pool);
3003c535e923SClaudiu Manoil
cgr_cleanup(u32 cgrid)3004c535e923SClaudiu Manoil static int cgr_cleanup(u32 cgrid)
3005c535e923SClaudiu Manoil {
3006c535e923SClaudiu Manoil /*
3007c535e923SClaudiu Manoil * query all FQDs starting from FQID 1 until we get an "invalid FQID"
3008c535e923SClaudiu Manoil * error, looking for non-OOS FQDs whose CGR is the CGR being released
3009c535e923SClaudiu Manoil */
3010c535e923SClaudiu Manoil struct qman_fq fq = {
3011d95cb0d3SClaudiu Manoil .fqid = QM_FQID_RANGE_START
3012c535e923SClaudiu Manoil };
3013c535e923SClaudiu Manoil int err;
3014c535e923SClaudiu Manoil
3015c535e923SClaudiu Manoil do {
3016c535e923SClaudiu Manoil struct qm_mcr_queryfq_np np;
3017c535e923SClaudiu Manoil
3018c535e923SClaudiu Manoil err = qman_query_fq_np(&fq, &np);
3019d95cb0d3SClaudiu Manoil if (err == -ERANGE)
3020c535e923SClaudiu Manoil /* FQID range exceeded, found no problems */
3021c535e923SClaudiu Manoil return 0;
3022d95cb0d3SClaudiu Manoil else if (WARN_ON(err))
3023d95cb0d3SClaudiu Manoil return err;
3024d95cb0d3SClaudiu Manoil
3025c535e923SClaudiu Manoil if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
3026c535e923SClaudiu Manoil struct qm_fqd fqd;
3027c535e923SClaudiu Manoil
3028c535e923SClaudiu Manoil err = qman_query_fq(&fq, &fqd);
3029c535e923SClaudiu Manoil if (WARN_ON(err))
3030d95cb0d3SClaudiu Manoil return err;
303118058822SClaudiu Manoil if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
3032c535e923SClaudiu Manoil fqd.cgid == cgrid) {
3033c535e923SClaudiu Manoil pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
3034c535e923SClaudiu Manoil cgrid, fq.fqid);
3035c535e923SClaudiu Manoil return -EIO;
3036c535e923SClaudiu Manoil }
3037c535e923SClaudiu Manoil }
3038c535e923SClaudiu Manoil /* Move to the next FQID */
3039c535e923SClaudiu Manoil fq.fqid++;
3040c535e923SClaudiu Manoil } while (1);
3041c535e923SClaudiu Manoil }
3042c535e923SClaudiu Manoil
qman_release_cgrid(u32 cgrid)3043c535e923SClaudiu Manoil int qman_release_cgrid(u32 cgrid)
3044c535e923SClaudiu Manoil {
3045c535e923SClaudiu Manoil int ret;
3046c535e923SClaudiu Manoil
3047c535e923SClaudiu Manoil ret = cgr_cleanup(cgrid);
3048c535e923SClaudiu Manoil if (ret) {
3049c535e923SClaudiu Manoil pr_debug("CGRID %d leaked\n", cgrid);
3050c535e923SClaudiu Manoil return ret;
3051c535e923SClaudiu Manoil }
3052c535e923SClaudiu Manoil
3053c535e923SClaudiu Manoil gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
3054c535e923SClaudiu Manoil return 0;
3055c535e923SClaudiu Manoil }
3056c535e923SClaudiu Manoil EXPORT_SYMBOL(qman_release_cgrid);
3057