1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 * 3 * Redistribution and use in source and binary forms, with or without 4 * modification, are permitted provided that the following conditions are met: 5 * * Redistributions of source code must retain the above copyright 6 * notice, this list of conditions and the following disclaimer. 7 * * Redistributions in binary form must reproduce the above copyright 8 * notice, this list of conditions and the following disclaimer in the 9 * documentation and/or other materials provided with the distribution. 10 * * Neither the name of Freescale Semiconductor nor the 11 * names of its contributors may be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * ALTERNATIVELY, this software may be distributed under the terms of the 15 * GNU General Public License ("GPL") as published by the Free Software 16 * Foundation, either version 2 of that License or (at your option) any 17 * later version. 18 * 19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #ifndef __FSL_QMAN_H 32 #define __FSL_QMAN_H 33 34 #include <linux/bitops.h> 35 36 /* Hardware constants */ 37 #define QM_CHANNEL_SWPORTAL0 0 38 #define QMAN_CHANNEL_POOL1 0x21 39 #define QMAN_CHANNEL_POOL1_REV3 0x401 40 extern u16 qm_channel_pool1; 41 42 /* Portal processing (interrupt) sources */ 43 #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ 44 #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ 45 #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ 46 #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ 47 #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ 48 /* 49 * This mask contains all the interrupt sources that need handling except DQRI, 50 * ie. that if present should trigger slow-path processing. 51 */ 52 #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ 53 QM_PIRQ_MRI) 54 55 /* For qman_static_dequeue_*** APIs */ 56 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff 57 /* for n in [1,15] */ 58 #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) 59 /* for conversion from n of qm_channel */ 60 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel) 61 { 62 return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1); 63 } 64 65 /* --- QMan data structures (and associated constants) --- */ 66 67 /* "Frame Descriptor (FD)" */ 68 struct qm_fd { 69 union { 70 struct { 71 u8 cfg8b_w1; 72 u8 bpid; /* Buffer Pool ID */ 73 u8 cfg8b_w3; 74 u8 addr_hi; /* high 8-bits of 40-bit address */ 75 __be32 addr_lo; /* low 32-bits of 40-bit address */ 76 } __packed; 77 __be64 data; 78 }; 79 __be32 cfg; /* format, offset, length / congestion */ 80 union { 81 __be32 cmd; 82 __be32 status; 83 }; 84 } __aligned(8); 85 86 #define QM_FD_FORMAT_SG BIT(31) 87 #define QM_FD_FORMAT_LONG BIT(30) 88 #define QM_FD_FORMAT_COMPOUND BIT(29) 89 #define QM_FD_FORMAT_MASK GENMASK(31, 29) 90 #define QM_FD_OFF_SHIFT 20 91 #define QM_FD_OFF_MASK GENMASK(28, 20) 92 #define QM_FD_LEN_MASK GENMASK(19, 0) 93 #define QM_FD_LEN_BIG_MASK GENMASK(28, 0) 94 95 enum qm_fd_format { 96 /* 97 * 'contig' implies a contiguous buffer, whereas 'sg' implies a 98 * scatter-gather table. 'big' implies a 29-bit length with no offset 99 * field, otherwise length is 20-bit and offset is 9-bit. 'compound' 100 * implies a s/g-like table, where each entry itself represents a frame 101 * (contiguous or scatter-gather) and the 29-bit "length" is 102 * interpreted purely for congestion calculations, ie. a "congestion 103 * weight". 104 */ 105 qm_fd_contig = 0, 106 qm_fd_contig_big = QM_FD_FORMAT_LONG, 107 qm_fd_sg = QM_FD_FORMAT_SG, 108 qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG, 109 qm_fd_compound = QM_FD_FORMAT_COMPOUND 110 }; 111 112 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) 113 { 114 return be64_to_cpu(fd->data) & 0xffffffffffLLU; 115 } 116 117 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd) 118 { 119 return be64_to_cpu(fd->data) & 0xffffffffffLLU; 120 } 121 122 static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr) 123 { 124 fd->addr_hi = upper_32_bits(addr); 125 fd->addr_lo = cpu_to_be32(lower_32_bits(addr)); 126 } 127 128 /* 129 * The 'format' field indicates the interpretation of the remaining 130 * 29 bits of the 32-bit word. 131 * If 'format' is _contig or _sg, 20b length and 9b offset. 132 * If 'format' is _contig_big or _sg_big, 29b length. 133 * If 'format' is _compound, 29b "congestion weight". 134 */ 135 static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd) 136 { 137 return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK; 138 } 139 140 static inline int qm_fd_get_offset(const struct qm_fd *fd) 141 { 142 return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT; 143 } 144 145 static inline int qm_fd_get_length(const struct qm_fd *fd) 146 { 147 return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK; 148 } 149 150 static inline int qm_fd_get_len_big(const struct qm_fd *fd) 151 { 152 return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK; 153 } 154 155 static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt, 156 int off, int len) 157 { 158 fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) | 159 ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK)); 160 } 161 162 #define qm_fd_set_contig(fd, off, len) \ 163 qm_fd_set_param(fd, qm_fd_contig, off, len) 164 #define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len) 165 #define qm_fd_set_contig_big(fd, len) \ 166 qm_fd_set_param(fd, qm_fd_contig_big, 0, len) 167 #define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len) 168 169 static inline void qm_fd_clear_fd(struct qm_fd *fd) 170 { 171 fd->data = 0; 172 fd->cfg = 0; 173 fd->cmd = 0; 174 } 175 176 /* Scatter/Gather table entry */ 177 struct qm_sg_entry { 178 union { 179 struct { 180 u8 __reserved1[3]; 181 u8 addr_hi; /* high 8-bits of 40-bit address */ 182 __be32 addr_lo; /* low 32-bits of 40-bit address */ 183 }; 184 __be64 data; 185 }; 186 __be32 cfg; /* E bit, F bit, length */ 187 u8 __reserved2; 188 u8 bpid; 189 __be16 offset; /* 13-bit, _res[13-15]*/ 190 } __packed; 191 192 #define QM_SG_LEN_MASK GENMASK(29, 0) 193 #define QM_SG_OFF_MASK GENMASK(12, 0) 194 #define QM_SG_FIN BIT(30) 195 #define QM_SG_EXT BIT(31) 196 197 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) 198 { 199 return be64_to_cpu(sg->data) & 0xffffffffffLLU; 200 } 201 202 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) 203 { 204 return be64_to_cpu(sg->data) & 0xffffffffffLLU; 205 } 206 207 static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr) 208 { 209 sg->addr_hi = upper_32_bits(addr); 210 sg->addr_lo = cpu_to_be32(lower_32_bits(addr)); 211 } 212 213 static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg) 214 { 215 return be32_to_cpu(sg->cfg) & QM_SG_FIN; 216 } 217 218 static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg) 219 { 220 return be32_to_cpu(sg->cfg) & QM_SG_EXT; 221 } 222 223 static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg) 224 { 225 return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK; 226 } 227 228 static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len) 229 { 230 sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK); 231 } 232 233 static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len) 234 { 235 sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK)); 236 } 237 238 static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) 239 { 240 return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK; 241 } 242 243 /* "Frame Dequeue Response" */ 244 struct qm_dqrr_entry { 245 u8 verb; 246 u8 stat; 247 __be16 seqnum; /* 15-bit */ 248 u8 tok; 249 u8 __reserved2[3]; 250 __be32 fqid; /* 24-bit */ 251 __be32 context_b; 252 struct qm_fd fd; 253 u8 __reserved4[32]; 254 } __packed; 255 #define QM_DQRR_VERB_VBIT 0x80 256 #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */ 257 #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */ 258 #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */ 259 #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */ 260 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */ 261 #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */ 262 #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ 263 #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ 264 265 /* 'fqid' is a 24-bit field in every h/w descriptor */ 266 #define QM_FQID_MASK GENMASK(23, 0) 267 #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK)) 268 #define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK) 269 270 /* "ERN Message Response" */ 271 /* "FQ State Change Notification" */ 272 union qm_mr_entry { 273 struct { 274 u8 verb; 275 u8 __reserved[63]; 276 }; 277 struct { 278 u8 verb; 279 u8 dca; 280 __be16 seqnum; 281 u8 rc; /* Rej Code: 8-bit */ 282 u8 __reserved[3]; 283 __be32 fqid; /* 24-bit */ 284 __be32 tag; 285 struct qm_fd fd; 286 u8 __reserved1[32]; 287 } __packed ern; 288 struct { 289 u8 verb; 290 u8 fqs; /* Frame Queue Status */ 291 u8 __reserved1[6]; 292 __be32 fqid; /* 24-bit */ 293 __be32 context_b; 294 u8 __reserved2[48]; 295 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ 296 }; 297 #define QM_MR_VERB_VBIT 0x80 298 /* 299 * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb 300 * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished 301 * from the other MR types by noting if the 0x20 bit is unset. 302 */ 303 #define QM_MR_VERB_TYPE_MASK 0x27 304 #define QM_MR_VERB_DC_ERN 0x20 305 #define QM_MR_VERB_FQRN 0x21 306 #define QM_MR_VERB_FQRNI 0x22 307 #define QM_MR_VERB_FQRL 0x23 308 #define QM_MR_VERB_FQPN 0x24 309 #define QM_MR_RC_MASK 0xf0 /* contains one of; */ 310 #define QM_MR_RC_CGR_TAILDROP 0x00 311 #define QM_MR_RC_WRED 0x10 312 #define QM_MR_RC_ERROR 0x20 313 #define QM_MR_RC_ORPWINDOW_EARLY 0x30 314 #define QM_MR_RC_ORPWINDOW_LATE 0x40 315 #define QM_MR_RC_FQ_TAILDROP 0x50 316 #define QM_MR_RC_ORPWINDOW_RETIRED 0x60 317 #define QM_MR_RC_ORP_ZERO 0x70 318 #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ 319 #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ 320 321 /* 322 * An identical structure of FQD fields is present in the "Init FQ" command and 323 * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type. 324 * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the 325 * latter has two inlines to assist with converting to/from the mant+exp 326 * representation. 327 */ 328 struct qm_fqd_stashing { 329 /* See QM_STASHING_EXCL_<...> */ 330 u8 exclusive; 331 /* Numbers of cachelines */ 332 u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */ 333 }; 334 335 struct qm_fqd_oac { 336 /* "Overhead Accounting Control", see QM_OAC_<...> */ 337 u8 oac; /* oac[6-7], _res[0-5] */ 338 /* Two's-complement value (-128 to +127) */ 339 s8 oal; /* "Overhead Accounting Length" */ 340 }; 341 342 struct qm_fqd { 343 /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */ 344 u8 orpc; 345 u8 cgid; 346 __be16 fq_ctrl; /* See QM_FQCTRL_<...> */ 347 __be16 dest_wq; /* channel[3-15], wq[0-2] */ 348 __be16 ics_cred; /* 15-bit */ 349 /* 350 * For "Initialize Frame Queue" commands, the write-enable mask 351 * determines whether 'td' or 'oac_init' is observed. For query 352 * commands, this field is always 'td', and 'oac_query' (below) reflects 353 * the Overhead ACcounting values. 354 */ 355 union { 356 __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */ 357 struct qm_fqd_oac oac_init; 358 }; 359 __be32 context_b; 360 union { 361 /* Treat it as 64-bit opaque */ 362 __be64 opaque; 363 struct { 364 __be32 hi; 365 __be32 lo; 366 }; 367 /* Treat it as s/w portal stashing config */ 368 /* see "FQD Context_A field used for [...]" */ 369 struct { 370 struct qm_fqd_stashing stashing; 371 /* 372 * 48-bit address of FQ context to 373 * stash, must be cacheline-aligned 374 */ 375 __be16 context_hi; 376 __be32 context_lo; 377 } __packed; 378 } context_a; 379 struct qm_fqd_oac oac_query; 380 } __packed; 381 382 #define QM_FQD_CHAN_OFF 3 383 #define QM_FQD_WQ_MASK GENMASK(2, 0) 384 #define QM_FQD_TD_EXP_MASK GENMASK(4, 0) 385 #define QM_FQD_TD_MANT_OFF 5 386 #define QM_FQD_TD_MANT_MASK GENMASK(12, 5) 387 #define QM_FQD_TD_MAX 0xe0000000 388 #define QM_FQD_TD_MANT_MAX 0xff 389 #define QM_FQD_OAC_OFF 6 390 #define QM_FQD_AS_OFF 4 391 #define QM_FQD_DS_OFF 2 392 #define QM_FQD_XS_MASK 0x3 393 394 /* 64-bit converters for context_hi/lo */ 395 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) 396 { 397 return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; 398 } 399 400 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd) 401 { 402 return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL; 403 } 404 405 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) 406 { 407 return qm_fqd_stashing_get64(fqd); 408 } 409 410 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) 411 { 412 fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr)); 413 fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr)); 414 } 415 416 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) 417 { 418 fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr)); 419 fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); 420 } 421 422 /* convert a threshold value into mant+exp representation */ 423 static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val, 424 int roundup) 425 { 426 u32 e = 0; 427 int td, oddbit = 0; 428 429 if (val > QM_FQD_TD_MAX) 430 return -ERANGE; 431 432 while (val > QM_FQD_TD_MANT_MAX) { 433 oddbit = val & 1; 434 val >>= 1; 435 e++; 436 if (roundup && oddbit) 437 val++; 438 } 439 440 td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK; 441 td |= (e & QM_FQD_TD_EXP_MASK); 442 fqd->td = cpu_to_be16(td); 443 return 0; 444 } 445 /* and the other direction */ 446 static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd) 447 { 448 int td = be16_to_cpu(fqd->td); 449 450 return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF) 451 << (td & QM_FQD_TD_EXP_MASK); 452 } 453 454 static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs) 455 { 456 struct qm_fqd_stashing *st = &fqd->context_a.stashing; 457 458 st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) | 459 ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) | 460 (cs & QM_FQD_XS_MASK); 461 } 462 463 static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd) 464 { 465 return fqd->context_a.stashing.cl; 466 } 467 468 static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val) 469 { 470 fqd->oac_init.oac = val << QM_FQD_OAC_OFF; 471 } 472 473 static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val) 474 { 475 fqd->oac_init.oal = val; 476 } 477 478 static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq) 479 { 480 fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) | 481 (wq & QM_FQD_WQ_MASK)); 482 } 483 484 static inline int qm_fqd_get_chan(const struct qm_fqd *fqd) 485 { 486 return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF; 487 } 488 489 static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) 490 { 491 return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK; 492 } 493 494 /* See "Frame Queue Descriptor (FQD)" */ 495 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */ 496 #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */ 497 #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */ 498 #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */ 499 #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */ 500 #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */ 501 #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */ 502 #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */ 503 #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */ 504 #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */ 505 #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */ 506 507 /* See "FQD Context_A field used for [...] */ 508 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */ 509 #define QM_STASHING_EXCL_ANNOTATION 0x04 510 #define QM_STASHING_EXCL_DATA 0x02 511 #define QM_STASHING_EXCL_CTX 0x01 512 513 /* See "Intra Class Scheduling" */ 514 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */ 515 #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */ 516 #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */ 517 518 /* 519 * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields 520 * and associated commands/responses. The WRED parameters are calculated from 521 * these fields as follows; 522 * MaxTH = MA * (2 ^ Mn) 523 * Slope = SA / (2 ^ Sn) 524 * MaxP = 4 * (Pn + 1) 525 */ 526 struct qm_cgr_wr_parm { 527 /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ 528 __be32 word; 529 }; 530 /* 531 * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding 532 * management commands, this is padded to a 16-bit structure field, so that's 533 * how we represent it here. The congestion state threshold is calculated from 534 * these fields as follows; 535 * CS threshold = TA * (2 ^ Tn) 536 */ 537 struct qm_cgr_cs_thres { 538 /* _res[13-15], TA[5-12], Tn[0-4] */ 539 __be16 word; 540 }; 541 /* 542 * This identical structure of CGR fields is present in the "Init/Modify CGR" 543 * commands and the "Query CGR" result. It's suctioned out here into its own 544 * struct. 545 */ 546 struct __qm_mc_cgr { 547 struct qm_cgr_wr_parm wr_parm_g; 548 struct qm_cgr_wr_parm wr_parm_y; 549 struct qm_cgr_wr_parm wr_parm_r; 550 u8 wr_en_g; /* boolean, use QM_CGR_EN */ 551 u8 wr_en_y; /* boolean, use QM_CGR_EN */ 552 u8 wr_en_r; /* boolean, use QM_CGR_EN */ 553 u8 cscn_en; /* boolean, use QM_CGR_EN */ 554 union { 555 struct { 556 __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */ 557 __be16 cscn_targ_dcp_low; 558 }; 559 __be32 cscn_targ; /* use QM_CGR_TARG_* */ 560 }; 561 u8 cstd_en; /* boolean, use QM_CGR_EN */ 562 u8 cs; /* boolean, only used in query response */ 563 struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */ 564 u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ 565 } __packed; 566 #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ 567 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ 568 #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ 569 #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ 570 #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */ 571 #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */ 572 /* Convert CGR thresholds to/from "cs_thres" format */ 573 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) 574 { 575 int thres = be16_to_cpu(th->word); 576 577 return ((thres >> 5) & 0xff) << (thres & 0x1f); 578 } 579 580 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, 581 int roundup) 582 { 583 u32 e = 0; 584 int oddbit = 0; 585 586 while (val > 0xff) { 587 oddbit = val & 1; 588 val >>= 1; 589 e++; 590 if (roundup && oddbit) 591 val++; 592 } 593 th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f)); 594 return 0; 595 } 596 597 /* "Initialize FQ" */ 598 struct qm_mcc_initfq { 599 u8 __reserved1[2]; 600 __be16 we_mask; /* Write Enable Mask */ 601 __be32 fqid; /* 24-bit */ 602 __be16 count; /* Initialises 'count+1' FQDs */ 603 struct qm_fqd fqd; /* the FQD fields go here */ 604 u8 __reserved2[30]; 605 } __packed; 606 /* "Initialize/Modify CGR" */ 607 struct qm_mcc_initcgr { 608 u8 __reserve1[2]; 609 __be16 we_mask; /* Write Enable Mask */ 610 struct __qm_mc_cgr cgr; /* CGR fields */ 611 u8 __reserved2[2]; 612 u8 cgid; 613 u8 __reserved3[32]; 614 } __packed; 615 616 /* INITFQ-specific flags */ 617 #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ 618 #define QM_INITFQ_WE_OAC 0x0100 619 #define QM_INITFQ_WE_ORPC 0x0080 620 #define QM_INITFQ_WE_CGID 0x0040 621 #define QM_INITFQ_WE_FQCTRL 0x0020 622 #define QM_INITFQ_WE_DESTWQ 0x0010 623 #define QM_INITFQ_WE_ICSCRED 0x0008 624 #define QM_INITFQ_WE_TDTHRESH 0x0004 625 #define QM_INITFQ_WE_CONTEXTB 0x0002 626 #define QM_INITFQ_WE_CONTEXTA 0x0001 627 /* INITCGR/MODIFYCGR-specific flags */ 628 #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */ 629 #define QM_CGR_WE_WR_PARM_G 0x0400 630 #define QM_CGR_WE_WR_PARM_Y 0x0200 631 #define QM_CGR_WE_WR_PARM_R 0x0100 632 #define QM_CGR_WE_WR_EN_G 0x0080 633 #define QM_CGR_WE_WR_EN_Y 0x0040 634 #define QM_CGR_WE_WR_EN_R 0x0020 635 #define QM_CGR_WE_CSCN_EN 0x0010 636 #define QM_CGR_WE_CSCN_TARG 0x0008 637 #define QM_CGR_WE_CSTD_EN 0x0004 638 #define QM_CGR_WE_CS_THRES 0x0002 639 #define QM_CGR_WE_MODE 0x0001 640 641 #define QMAN_CGR_FLAG_USE_INIT 0x00000001 642 643 /* Portal and Frame Queues */ 644 /* Represents a managed portal */ 645 struct qman_portal; 646 647 /* 648 * This object type represents QMan frame queue descriptors (FQD), it is 649 * cacheline-aligned, and initialised by qman_create_fq(). The structure is 650 * defined further down. 651 */ 652 struct qman_fq; 653 654 /* 655 * This object type represents a QMan congestion group, it is defined further 656 * down. 657 */ 658 struct qman_cgr; 659 660 /* 661 * This enum, and the callback type that returns it, are used when handling 662 * dequeued frames via DQRR. Note that for "null" callbacks registered with the 663 * portal object (for handling dequeues that do not demux because context_b is 664 * NULL), the return value *MUST* be qman_cb_dqrr_consume. 665 */ 666 enum qman_cb_dqrr_result { 667 /* DQRR entry can be consumed */ 668 qman_cb_dqrr_consume, 669 /* Like _consume, but requests parking - FQ must be held-active */ 670 qman_cb_dqrr_park, 671 /* Does not consume, for DCA mode only. */ 672 qman_cb_dqrr_defer, 673 /* 674 * Stop processing without consuming this ring entry. Exits the current 675 * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within 676 * an interrupt handler, the callback would typically call 677 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value, 678 * otherwise the interrupt will reassert immediately. 679 */ 680 qman_cb_dqrr_stop, 681 /* Like qman_cb_dqrr_stop, but consumes the current entry. */ 682 qman_cb_dqrr_consume_stop 683 }; 684 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm, 685 struct qman_fq *fq, 686 const struct qm_dqrr_entry *dqrr); 687 688 /* 689 * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They 690 * are always consumed after the callback returns. 691 */ 692 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 693 const union qm_mr_entry *msg); 694 695 /* 696 * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active + 697 * held-active + held-suspended are just "sched". Things like "retired" will not 698 * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until 699 * then, to indicate it's completing and to gate attempts to retry the retire 700 * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's 701 * technically impossible in the case of enqueue DCAs (which refer to DQRR ring 702 * index rather than the FQ that ring entry corresponds to), so repeated park 703 * commands are allowed (if you're silly enough to try) but won't change FQ 704 * state, and the resulting park notifications move FQs from "sched" to 705 * "parked". 706 */ 707 enum qman_fq_state { 708 qman_fq_state_oos, 709 qman_fq_state_parked, 710 qman_fq_state_sched, 711 qman_fq_state_retired 712 }; 713 714 #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */ 715 #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */ 716 #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */ 717 #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */ 718 #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */ 719 #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */ 720 721 /* 722 * Frame queue objects (struct qman_fq) are stored within memory passed to 723 * qman_create_fq(), as this allows stashing of caller-provided demux callback 724 * pointers at no extra cost to stashing of (driver-internal) FQ state. If the 725 * caller wishes to add per-FQ state and have it benefit from dequeue-stashing, 726 * they should; 727 * 728 * (a) extend the qman_fq structure with their state; eg. 729 * 730 * // myfq is allocated and driver_fq callbacks filled in; 731 * struct my_fq { 732 * struct qman_fq base; 733 * int an_extra_field; 734 * [ ... add other fields to be associated with each FQ ...] 735 * } *myfq = some_my_fq_allocator(); 736 * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base); 737 * 738 * // in a dequeue callback, access extra fields from 'fq' via a cast; 739 * struct my_fq *myfq = (struct my_fq *)fq; 740 * do_something_with(myfq->an_extra_field); 741 * [...] 742 * 743 * (b) when and if configuring the FQ for context stashing, specify how ever 744 * many cachelines are required to stash 'struct my_fq', to accelerate not 745 * only the QMan driver but the callback as well. 746 */ 747 748 struct qman_fq_cb { 749 qman_cb_dqrr dqrr; /* for dequeued frames */ 750 qman_cb_mr ern; /* for s/w ERNs */ 751 qman_cb_mr fqs; /* frame-queue state changes*/ 752 }; 753 754 struct qman_fq { 755 /* Caller of qman_create_fq() provides these demux callbacks */ 756 struct qman_fq_cb cb; 757 /* 758 * These are internal to the driver, don't touch. In particular, they 759 * may change, be removed, or extended (so you shouldn't rely on 760 * sizeof(qman_fq) being a constant). 761 */ 762 u32 fqid, idx; 763 unsigned long flags; 764 enum qman_fq_state state; 765 int cgr_groupid; 766 }; 767 768 /* 769 * This callback type is used when handling congestion group entry/exit. 770 * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. 771 */ 772 typedef void (*qman_cb_cgr)(struct qman_portal *qm, 773 struct qman_cgr *cgr, int congested); 774 775 struct qman_cgr { 776 /* Set these prior to qman_create_cgr() */ 777 u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/ 778 qman_cb_cgr cb; 779 /* These are private to the driver */ 780 u16 chan; /* portal channel this object is created on */ 781 struct list_head node; 782 }; 783 784 /* Flags to qman_create_fq() */ 785 #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */ 786 #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */ 787 #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */ 788 #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */ 789 790 /* Flags to qman_init_fq() */ 791 #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */ 792 #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */ 793 794 /* Portal Management */ 795 /** 796 * qman_p_irqsource_add - add processing sources to be interrupt-driven 797 * @bits: bitmask of QM_PIRQ_**I processing sources 798 * 799 * Adds processing sources that should be interrupt-driven (rather than 800 * processed via qman_poll_***() functions). 801 */ 802 void qman_p_irqsource_add(struct qman_portal *p, u32 bits); 803 804 /** 805 * qman_p_irqsource_remove - remove processing sources from being int-driven 806 * @bits: bitmask of QM_PIRQ_**I processing sources 807 * 808 * Removes processing sources from being interrupt-driven, so that they will 809 * instead be processed via qman_poll_***() functions. 810 */ 811 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits); 812 813 /** 814 * qman_affine_cpus - return a mask of cpus that have affine portals 815 */ 816 const cpumask_t *qman_affine_cpus(void); 817 818 /** 819 * qman_affine_channel - return the channel ID of an portal 820 * @cpu: the cpu whose affine portal is the subject of the query 821 * 822 * If @cpu is -1, the affine portal for the current CPU will be used. It is a 823 * bug to call this function for any value of @cpu (other than -1) that is not a 824 * member of the mask returned from qman_affine_cpus(). 825 */ 826 u16 qman_affine_channel(int cpu); 827 828 /** 829 * qman_get_affine_portal - return the portal pointer affine to cpu 830 * @cpu: the cpu whose affine portal is the subject of the query 831 */ 832 struct qman_portal *qman_get_affine_portal(int cpu); 833 834 /** 835 * qman_p_poll_dqrr - process DQRR (fast-path) entries 836 * @limit: the maximum number of DQRR entries to process 837 * 838 * Use of this function requires that DQRR processing not be interrupt-driven. 839 * The return value represents the number of DQRR entries processed. 840 */ 841 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit); 842 843 /** 844 * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR 845 * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n) 846 * 847 * Adds a set of pool channels to the portal's static dequeue command register 848 * (SDQCR). The requested pools are limited to those the portal has dequeue 849 * access to. 850 */ 851 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); 852 853 /* FQ management */ 854 /** 855 * qman_create_fq - Allocates a FQ 856 * @fqid: the index of the FQD to encapsulate, must be "Out of Service" 857 * @flags: bit-mask of QMAN_FQ_FLAG_*** options 858 * @fq: memory for storing the 'fq', with callbacks filled in 859 * 860 * Creates a frame queue object for the given @fqid, unless the 861 * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is 862 * dynamically allocated (or the function fails if none are available). Once 863 * created, the caller should not touch the memory at 'fq' except as extended to 864 * adjacent memory for user-defined fields (see the definition of "struct 865 * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to 866 * pre-existing frame-queues that aren't to be otherwise interfered with, it 867 * prevents all other modifications to the frame queue. The TO_DCPORTAL flag 868 * causes the driver to honour any context_b modifications requested in the 869 * qm_init_fq() API, as this indicates the frame queue will be consumed by a 870 * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by 871 * software portals, the context_b field is controlled by the driver and can't 872 * be modified by the caller. 873 */ 874 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 875 876 /** 877 * qman_destroy_fq - Deallocates a FQ 878 * @fq: the frame queue object to release 879 * 880 * The memory for this frame queue object ('fq' provided in qman_create_fq()) is 881 * not deallocated but the caller regains ownership, to do with as desired. The 882 * FQ must be in the 'out-of-service' or in the 'parked' state. 883 */ 884 void qman_destroy_fq(struct qman_fq *fq); 885 886 /** 887 * qman_fq_fqid - Queries the frame queue ID of a FQ object 888 * @fq: the frame queue object to query 889 */ 890 u32 qman_fq_fqid(struct qman_fq *fq); 891 892 /** 893 * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled" 894 * @fq: the frame queue object to modify, must be 'parked' or new. 895 * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options 896 * @opts: the FQ-modification settings, as defined in the low-level API 897 * 898 * The @opts parameter comes from the low-level portal API. Select 899 * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled 900 * rather than parked. NB, @opts can be NULL. 901 * 902 * Note that some fields and options within @opts may be ignored or overwritten 903 * by the driver; 904 * 1. the 'count' and 'fqid' fields are always ignored (this operation only 905 * affects one frame queue: @fq). 906 * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated 907 * 'fqd' structure's 'context_b' field are sometimes overwritten; 908 * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is 909 * initialised to a value used by the driver for demux. 910 * - if context_b is initialised for demux, so is context_a in case stashing 911 * is requested (see item 4). 912 * (So caller control of context_b is only possible for TO_DCPORTAL frame queue 913 * objects.) 914 * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's 915 * 'dest::channel' field will be overwritten to match the portal used to issue 916 * the command. If the WE_DESTWQ write-enable bit had already been set by the 917 * caller, the channel workqueue will be left as-is, otherwise the write-enable 918 * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag 919 * isn't set, the destination channel/workqueue fields and the write-enable bit 920 * are left as-is. 921 * 4. if the driver overwrites context_a/b for demux, then if 922 * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite 923 * context_a.address fields and will leave the stashing fields provided by the 924 * user alone, otherwise it will zero out the context_a.stashing fields. 925 */ 926 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); 927 928 /** 929 * qman_schedule_fq - Schedules a FQ 930 * @fq: the frame queue object to schedule, must be 'parked' 931 * 932 * Schedules the frame queue, which must be Parked, which takes it to 933 * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level. 934 */ 935 int qman_schedule_fq(struct qman_fq *fq); 936 937 /** 938 * qman_retire_fq - Retires a FQ 939 * @fq: the frame queue object to retire 940 * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately 941 * 942 * Retires the frame queue. This returns zero if it succeeds immediately, +1 if 943 * the retirement was started asynchronously, otherwise it returns negative for 944 * failure. When this function returns zero, @flags is set to indicate whether 945 * the retired FQ is empty and/or whether it has any ORL fragments (to show up 946 * as ERNs). Otherwise the corresponding flags will be known when a subsequent 947 * FQRN message shows up on the portal's message ring. 948 * 949 * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or 950 * Active state), the completion will be via the message ring as a FQRN - but 951 * the corresponding callback may occur before this function returns!! Ie. the 952 * caller should be prepared to accept the callback as the function is called, 953 * not only once it has returned. 954 */ 955 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 956 957 /** 958 * qman_oos_fq - Puts a FQ "out of service" 959 * @fq: the frame queue object to be put out-of-service, must be 'retired' 960 * 961 * The frame queue must be retired and empty, and if any order restoration list 962 * was released as ERNs at the time of retirement, they must all be consumed. 963 */ 964 int qman_oos_fq(struct qman_fq *fq); 965 966 /** 967 * qman_enqueue - Enqueue a frame to a frame queue 968 * @fq: the frame queue object to enqueue to 969 * @fd: a descriptor of the frame to be enqueued 970 * 971 * Fills an entry in the EQCR of portal @qm to enqueue the frame described by 972 * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid' 973 * field is ignored. The return value is non-zero on error, such as ring full. 974 */ 975 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd); 976 977 /** 978 * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs 979 * @result: is set by the API to the base FQID of the allocated range 980 * @count: the number of FQIDs required 981 * 982 * Returns 0 on success, or a negative error code. 983 */ 984 int qman_alloc_fqid_range(u32 *result, u32 count); 985 #define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1) 986 987 /** 988 * qman_release_fqid - Release the specified frame queue ID 989 * @fqid: the FQID to be released back to the resource pool 990 * 991 * This function can also be used to seed the allocator with 992 * FQID ranges that it can subsequently allocate from. 993 * Returns 0 on success, or a negative error code. 994 */ 995 int qman_release_fqid(u32 fqid); 996 997 /* Pool-channel management */ 998 /** 999 * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs 1000 * @result: is set by the API to the base pool-channel ID of the allocated range 1001 * @count: the number of pool-channel IDs required 1002 * 1003 * Returns 0 on success, or a negative error code. 1004 */ 1005 int qman_alloc_pool_range(u32 *result, u32 count); 1006 #define qman_alloc_pool(result) qman_alloc_pool_range(result, 1) 1007 1008 /** 1009 * qman_release_pool - Release the specified pool-channel ID 1010 * @id: the pool-chan ID to be released back to the resource pool 1011 * 1012 * This function can also be used to seed the allocator with 1013 * pool-channel ID ranges that it can subsequently allocate from. 1014 * Returns 0 on success, or a negative error code. 1015 */ 1016 int qman_release_pool(u32 id); 1017 1018 /* CGR management */ 1019 /** 1020 * qman_create_cgr - Register a congestion group object 1021 * @cgr: the 'cgr' object, with fields filled in 1022 * @flags: QMAN_CGR_FLAG_* values 1023 * @opts: optional state of CGR settings 1024 * 1025 * Registers this object to receiving congestion entry/exit callbacks on the 1026 * portal affine to the cpu portal on which this API is executed. If opts is 1027 * NULL then only the callback (cgr->cb) function is registered. If @flags 1028 * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset 1029 * any unspecified parameters) will be used rather than a modify hw hardware 1030 * (which only modifies the specified parameters). 1031 */ 1032 int qman_create_cgr(struct qman_cgr *cgr, u32 flags, 1033 struct qm_mcc_initcgr *opts); 1034 1035 /** 1036 * qman_delete_cgr - Deregisters a congestion group object 1037 * @cgr: the 'cgr' object to deregister 1038 * 1039 * "Unplugs" this CGR object from the portal affine to the cpu on which this API 1040 * is executed. This must be excuted on the same affine portal on which it was 1041 * created. 1042 */ 1043 int qman_delete_cgr(struct qman_cgr *cgr); 1044 1045 /** 1046 * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU 1047 * @cgr: the 'cgr' object to deregister 1048 * 1049 * This will select the proper CPU and run there qman_delete_cgr(). 1050 */ 1051 void qman_delete_cgr_safe(struct qman_cgr *cgr); 1052 1053 /** 1054 * qman_query_cgr_congested - Queries CGR's congestion status 1055 * @cgr: the 'cgr' object to query 1056 * @result: returns 'cgr's congestion status, 1 (true) if congested 1057 */ 1058 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result); 1059 1060 /** 1061 * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs 1062 * @result: is set by the API to the base CGR ID of the allocated range 1063 * @count: the number of CGR IDs required 1064 * 1065 * Returns 0 on success, or a negative error code. 1066 */ 1067 int qman_alloc_cgrid_range(u32 *result, u32 count); 1068 #define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1) 1069 1070 /** 1071 * qman_release_cgrid - Release the specified CGR ID 1072 * @id: the CGR ID to be released back to the resource pool 1073 * 1074 * This function can also be used to seed the allocator with 1075 * CGR ID ranges that it can subsequently allocate from. 1076 * Returns 0 on success, or a negative error code. 1077 */ 1078 int qman_release_cgrid(u32 id); 1079 1080 #endif /* __FSL_QMAN_H */ 1081