1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 2 /* 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 4 * Copyright 2016 NXP 5 * 6 */ 7 #ifndef __FSL_QBMAN_PORTAL_H 8 #define __FSL_QBMAN_PORTAL_H 9 10 #include <soc/fsl/dpaa2-fd.h> 11 12 struct dpaa2_dq; 13 struct qbman_swp; 14 15 /* qbman software portal descriptor structure */ 16 struct qbman_swp_desc { 17 void *cena_bar; /* Cache-enabled portal base address */ 18 void __iomem *cinh_bar; /* Cache-inhibited portal base address */ 19 u32 qman_version; 20 }; 21 22 #define QBMAN_SWP_INTERRUPT_EQRI 0x01 23 #define QBMAN_SWP_INTERRUPT_EQDI 0x02 24 #define QBMAN_SWP_INTERRUPT_DQRI 0x04 25 #define QBMAN_SWP_INTERRUPT_RCRI 0x08 26 #define QBMAN_SWP_INTERRUPT_RCDI 0x10 27 #define QBMAN_SWP_INTERRUPT_VDCI 0x20 28 29 /* the structure for pull dequeue descriptor */ 30 struct qbman_pull_desc { 31 u8 verb; 32 u8 numf; 33 u8 tok; 34 u8 reserved; 35 __le32 dq_src; 36 __le64 rsp_addr; 37 u64 rsp_addr_virt; 38 u8 padding[40]; 39 }; 40 41 enum qbman_pull_type_e { 42 /* dequeue with priority precedence, respect intra-class scheduling */ 43 qbman_pull_type_prio = 1, 44 /* dequeue with active FQ precedence, respect ICS */ 45 qbman_pull_type_active, 46 /* dequeue with active FQ precedence, no ICS */ 47 qbman_pull_type_active_noics 48 }; 49 50 /* Definitions for parsing dequeue entries */ 51 #define QBMAN_RESULT_MASK 0x7f 52 #define QBMAN_RESULT_DQ 0x60 53 #define QBMAN_RESULT_FQRN 0x21 54 #define QBMAN_RESULT_FQRNI 0x22 55 #define QBMAN_RESULT_FQPN 0x24 56 #define QBMAN_RESULT_FQDAN 0x25 57 #define QBMAN_RESULT_CDAN 0x26 58 #define QBMAN_RESULT_CSCN_MEM 0x27 59 #define QBMAN_RESULT_CGCU 0x28 60 #define QBMAN_RESULT_BPSCN 0x29 61 #define QBMAN_RESULT_CSCN_WQ 0x2a 62 63 /* QBMan FQ management command codes */ 64 #define QBMAN_FQ_SCHEDULE 0x48 65 #define QBMAN_FQ_FORCE 0x49 66 #define QBMAN_FQ_XON 0x4d 67 #define QBMAN_FQ_XOFF 0x4e 68 69 /* structure of enqueue descriptor */ 70 struct qbman_eq_desc { 71 u8 verb; 72 u8 dca; 73 __le16 seqnum; 74 __le16 orpid; 75 __le16 reserved1; 76 __le32 tgtid; 77 __le32 tag; 78 __le16 qdbin; 79 u8 qpri; 80 u8 reserved[3]; 81 u8 wae; 82 u8 rspid; 83 __le64 rsp_addr; 84 u8 fd[32]; 85 }; 86 87 /* buffer release descriptor */ 88 struct qbman_release_desc { 89 u8 verb; 90 u8 reserved; 91 __le16 bpid; 92 __le32 reserved2; 93 __le64 buf[7]; 94 }; 95 96 /* Management command result codes */ 97 #define QBMAN_MC_RSLT_OK 0xf0 98 99 #define CODE_CDAN_WE_EN 0x1 100 #define CODE_CDAN_WE_CTX 0x4 101 102 /* portal data structure */ 103 struct qbman_swp { 104 const struct qbman_swp_desc *desc; 105 void *addr_cena; 106 void __iomem *addr_cinh; 107 108 /* Management commands */ 109 struct { 110 u32 valid_bit; /* 0x00 or 0x80 */ 111 } mc; 112 113 /* Push dequeues */ 114 u32 sdq; 115 116 /* Volatile dequeues */ 117 struct { 118 atomic_t available; /* indicates if a command can be sent */ 119 u32 valid_bit; /* 0x00 or 0x80 */ 120 struct dpaa2_dq *storage; /* NULL if DQRR */ 121 } vdq; 122 123 /* DQRR */ 124 struct { 125 u32 next_idx; 126 u32 valid_bit; 127 u8 dqrr_size; 128 int reset_bug; /* indicates dqrr reset workaround is needed */ 129 } dqrr; 130 }; 131 132 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); 133 void qbman_swp_finish(struct qbman_swp *p); 134 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p); 135 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask); 136 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p); 137 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask); 138 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); 139 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); 140 141 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled); 142 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable); 143 144 void qbman_pull_desc_clear(struct qbman_pull_desc *d); 145 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, 146 struct dpaa2_dq *storage, 147 dma_addr_t storage_phys, 148 int stash); 149 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes); 150 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid); 151 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, 152 enum qbman_pull_type_e dct); 153 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, 154 enum qbman_pull_type_e dct); 155 156 int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d); 157 158 const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); 159 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); 160 161 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq); 162 163 void qbman_eq_desc_clear(struct qbman_eq_desc *d); 164 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); 165 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token); 166 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid); 167 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, 168 u32 qd_bin, u32 qd_prio); 169 170 int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d, 171 const struct dpaa2_fd *fd); 172 173 void qbman_release_desc_clear(struct qbman_release_desc *d); 174 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid); 175 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable); 176 177 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, 178 const u64 *buffers, unsigned int num_buffers); 179 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, 180 unsigned int num_buffers); 181 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, 182 u8 alt_fq_verb); 183 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, 184 u8 we_mask, u8 cdan_en, 185 u64 ctx); 186 187 void *qbman_swp_mc_start(struct qbman_swp *p); 188 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb); 189 void *qbman_swp_mc_result(struct qbman_swp *p); 190 191 /** 192 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response 193 * @dq: the dequeue result to be checked 194 * 195 * DQRR entries may contain non-dequeue results, ie. notifications 196 */ 197 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq) 198 { 199 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ); 200 } 201 202 /** 203 * qbman_result_is_SCN() - Check the dequeue result is notification or not 204 * @dq: the dequeue result to be checked 205 * 206 */ 207 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) 208 { 209 return !qbman_result_is_DQ(dq); 210 } 211 212 /* FQ Data Availability */ 213 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) 214 { 215 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN); 216 } 217 218 /* Channel Data Availability */ 219 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq) 220 { 221 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN); 222 } 223 224 /* Congestion State Change */ 225 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq) 226 { 227 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ); 228 } 229 230 /* Buffer Pool State Change */ 231 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) 232 { 233 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN); 234 } 235 236 /* Congestion Group Count Update */ 237 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq) 238 { 239 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU); 240 } 241 242 /* Retirement */ 243 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq) 244 { 245 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN); 246 } 247 248 /* Retirement Immediate */ 249 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) 250 { 251 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI); 252 } 253 254 /* Park */ 255 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq) 256 { 257 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN); 258 } 259 260 /** 261 * qbman_result_SCN_state() - Get the state field in State-change notification 262 */ 263 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn) 264 { 265 return scn->scn.state; 266 } 267 268 #define SCN_RID_MASK 0x00FFFFFF 269 270 /** 271 * qbman_result_SCN_rid() - Get the resource id in State-change notification 272 */ 273 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn) 274 { 275 return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK; 276 } 277 278 /** 279 * qbman_result_SCN_ctx() - Get the context data in State-change notification 280 */ 281 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn) 282 { 283 return le64_to_cpu(scn->scn.ctx); 284 } 285 286 /** 287 * qbman_swp_fq_schedule() - Move the fq to the scheduled state 288 * @s: the software portal object 289 * @fqid: the index of frame queue to be scheduled 290 * 291 * There are a couple of different ways that a FQ can end up parked state, 292 * This schedules it. 293 * 294 * Return 0 for success, or negative error code for failure. 295 */ 296 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid) 297 { 298 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); 299 } 300 301 /** 302 * qbman_swp_fq_force() - Force the FQ to fully scheduled state 303 * @s: the software portal object 304 * @fqid: the index of frame queue to be forced 305 * 306 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled 307 * and thus be available for selection by any channel-dequeuing behaviour (push 308 * or pull). If the FQ is subsequently "dequeued" from the channel and is still 309 * empty at the time this happens, the resulting dq_entry will have no FD. 310 * (qbman_result_DQ_fd() will return NULL.) 311 * 312 * Return 0 for success, or negative error code for failure. 313 */ 314 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid) 315 { 316 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); 317 } 318 319 /** 320 * qbman_swp_fq_xon() - sets FQ flow-control to XON 321 * @s: the software portal object 322 * @fqid: the index of frame queue 323 * 324 * This setting doesn't affect enqueues to the FQ, just dequeues. 325 * 326 * Return 0 for success, or negative error code for failure. 327 */ 328 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid) 329 { 330 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); 331 } 332 333 /** 334 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF 335 * @s: the software portal object 336 * @fqid: the index of frame queue 337 * 338 * This setting doesn't affect enqueues to the FQ, just dequeues. 339 * XOFF FQs will remain in the tenatively-scheduled state, even when 340 * non-empty, meaning they won't be selected for scheduled dequeuing. 341 * If a FQ is changed to XOFF after it had already become truly-scheduled 342 * to a channel, and a pull dequeue of that channel occurs that selects 343 * that FQ for dequeuing, then the resulting dq_entry will have no FD. 344 * (qbman_result_DQ_fd() will return NULL.) 345 * 346 * Return 0 for success, or negative error code for failure. 347 */ 348 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid) 349 { 350 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); 351 } 352 353 /* If the user has been allocated a channel object that is going to generate 354 * CDANs to another channel, then the qbman_swp_CDAN* functions will be 355 * necessary. 356 * 357 * CDAN-enabled channels only generate a single CDAN notification, after which 358 * they need to be reenabled before they'll generate another. The idea is 359 * that pull dequeuing will occur in reaction to the CDAN, followed by a 360 * reenable step. Each function generates a distinct command to hardware, so a 361 * combination function is provided if the user wishes to modify the "context" 362 * (which shows up in each CDAN message) each time they reenable, as a single 363 * command to hardware. 364 */ 365 366 /** 367 * qbman_swp_CDAN_set_context() - Set CDAN context 368 * @s: the software portal object 369 * @channelid: the channel index 370 * @ctx: the context to be set in CDAN 371 * 372 * Return 0 for success, or negative error code for failure. 373 */ 374 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid, 375 u64 ctx) 376 { 377 return qbman_swp_CDAN_set(s, channelid, 378 CODE_CDAN_WE_CTX, 379 0, ctx); 380 } 381 382 /** 383 * qbman_swp_CDAN_enable() - Enable CDAN for the channel 384 * @s: the software portal object 385 * @channelid: the index of the channel to generate CDAN 386 * 387 * Return 0 for success, or negative error code for failure. 388 */ 389 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid) 390 { 391 return qbman_swp_CDAN_set(s, channelid, 392 CODE_CDAN_WE_EN, 393 1, 0); 394 } 395 396 /** 397 * qbman_swp_CDAN_disable() - disable CDAN for the channel 398 * @s: the software portal object 399 * @channelid: the index of the channel to generate CDAN 400 * 401 * Return 0 for success, or negative error code for failure. 402 */ 403 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid) 404 { 405 return qbman_swp_CDAN_set(s, channelid, 406 CODE_CDAN_WE_EN, 407 0, 0); 408 } 409 410 /** 411 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN 412 * @s: the software portal object 413 * @channelid: the index of the channel to generate CDAN 414 * @ctx:i the context set in CDAN 415 * 416 * Return 0 for success, or negative error code for failure. 417 */ 418 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, 419 u16 channelid, 420 u64 ctx) 421 { 422 return qbman_swp_CDAN_set(s, channelid, 423 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, 424 1, ctx); 425 } 426 427 /* Wraps up submit + poll-for-result */ 428 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, 429 u8 cmd_verb) 430 { 431 int loopvar = 1000; 432 433 qbman_swp_mc_submit(swp, cmd, cmd_verb); 434 435 do { 436 cmd = qbman_swp_mc_result(swp); 437 } while (!cmd && loopvar--); 438 439 WARN_ON(!loopvar); 440 441 return cmd; 442 } 443 444 /* Query APIs */ 445 struct qbman_fq_query_np_rslt { 446 u8 verb; 447 u8 rslt; 448 u8 st1; 449 u8 st2; 450 u8 reserved[2]; 451 __le16 od1_sfdr; 452 __le16 od2_sfdr; 453 __le16 od3_sfdr; 454 __le16 ra1_sfdr; 455 __le16 ra2_sfdr; 456 __le32 pfdr_hptr; 457 __le32 pfdr_tptr; 458 __le32 frm_cnt; 459 __le32 byte_cnt; 460 __le16 ics_surp; 461 u8 is; 462 u8 reserved2[29]; 463 }; 464 465 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, 466 struct qbman_fq_query_np_rslt *r); 467 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r); 468 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r); 469 470 struct qbman_bp_query_rslt { 471 u8 verb; 472 u8 rslt; 473 u8 reserved[4]; 474 u8 bdi; 475 u8 state; 476 __le32 fill; 477 __le32 hdotr; 478 __le16 swdet; 479 __le16 swdxt; 480 __le16 hwdet; 481 __le16 hwdxt; 482 __le16 swset; 483 __le16 swsxt; 484 __le16 vbpid; 485 __le16 icid; 486 __le64 bpscn_addr; 487 __le64 bpscn_ctx; 488 __le16 hw_targ; 489 u8 dbe; 490 u8 reserved2; 491 u8 sdcnt; 492 u8 hdcnt; 493 u8 sscnt; 494 u8 reserved3[9]; 495 }; 496 497 int qbman_bp_query(struct qbman_swp *s, u16 bpid, 498 struct qbman_bp_query_rslt *r); 499 500 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a); 501 502 #endif /* __FSL_QBMAN_PORTAL_H */ 503