1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <asm/cacheflush.h> 9 #include <linux/io.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <soc/fsl/dpaa2-global.h> 13 14 #include "qbman-portal.h" 15 16 /* All QBMan command and result structures use this "valid bit" encoding */ 17 #define QB_VALID_BIT ((u32)0x80) 18 19 /* QBMan portal management command codes */ 20 #define QBMAN_MC_ACQUIRE 0x30 21 #define QBMAN_WQCHAN_CONFIGURE 0x46 22 23 /* CINH register offsets */ 24 #define QBMAN_CINH_SWP_EQCR_PI 0x800 25 #define QBMAN_CINH_SWP_EQCR_CI 0x840 26 #define QBMAN_CINH_SWP_EQAR 0x8c0 27 #define QBMAN_CINH_SWP_CR_RT 0x900 28 #define QBMAN_CINH_SWP_VDQCR_RT 0x940 29 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 30 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 31 #define QBMAN_CINH_SWP_DQPI 0xa00 32 #define QBMAN_CINH_SWP_DCAP 0xac0 33 #define QBMAN_CINH_SWP_SDQCR 0xb00 34 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 35 #define QBMAN_CINH_SWP_RCR_PI 0xc00 36 #define QBMAN_CINH_SWP_RAR 0xcc0 37 #define QBMAN_CINH_SWP_ISR 0xe00 38 #define QBMAN_CINH_SWP_IER 0xe40 39 #define QBMAN_CINH_SWP_ISDR 0xe80 40 #define QBMAN_CINH_SWP_IIR 0xec0 41 42 /* CENA register offsets */ 43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) 44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6)) 45 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6)) 46 #define QBMAN_CENA_SWP_CR 0x600 47 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 48 #define QBMAN_CENA_SWP_VDQCR 0x780 49 #define QBMAN_CENA_SWP_EQCR_CI 0x840 50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840 51 52 /* CENA register offsets in memory-backed mode */ 53 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) 54 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6)) 55 #define QBMAN_CENA_SWP_CR_MEM 0x1600 56 #define QBMAN_CENA_SWP_RR_MEM 0x1680 57 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780 58 59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) 61 62 /* Define token used to determine if response written to memory is valid */ 63 #define QMAN_DQ_TOKEN_VALID 1 64 65 /* SDQCR attribute codes */ 66 #define QB_SDQCR_FC_SHIFT 29 67 #define QB_SDQCR_FC_MASK 0x1 68 #define QB_SDQCR_DCT_SHIFT 24 69 #define QB_SDQCR_DCT_MASK 0x3 70 #define QB_SDQCR_TOK_SHIFT 16 71 #define QB_SDQCR_TOK_MASK 0xff 72 #define QB_SDQCR_SRC_SHIFT 0 73 #define QB_SDQCR_SRC_MASK 0xffff 74 75 /* opaque token for static dequeues */ 76 #define QMAN_SDQCR_TOKEN 0xbb 77 78 #define QBMAN_EQCR_DCA_IDXMASK 0x0f 79 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) 80 81 #define EQ_DESC_SIZE_WITHOUT_FD 29 82 #define EQ_DESC_SIZE_FD_START 32 83 84 enum qbman_sdqcr_dct { 85 qbman_sdqcr_dct_null = 0, 86 qbman_sdqcr_dct_prio_ics, 87 qbman_sdqcr_dct_active_ics, 88 qbman_sdqcr_dct_active 89 }; 90 91 enum qbman_sdqcr_fc { 92 qbman_sdqcr_fc_one = 0, 93 qbman_sdqcr_fc_up_to_3 = 1 94 }; 95 96 /* Internal Function declaration */ 97 static int qbman_swp_enqueue_direct(struct qbman_swp *s, 98 const struct qbman_eq_desc *d, 99 const struct dpaa2_fd *fd); 100 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 101 const struct qbman_eq_desc *d, 102 const struct dpaa2_fd *fd); 103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 104 const struct qbman_eq_desc *d, 105 const struct dpaa2_fd *fd, 106 uint32_t *flags, 107 int num_frames); 108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 109 const struct qbman_eq_desc *d, 110 const struct dpaa2_fd *fd, 111 uint32_t *flags, 112 int num_frames); 113 static int 114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 115 const struct qbman_eq_desc *d, 116 const struct dpaa2_fd *fd, 117 int num_frames); 118 static 119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 120 const struct qbman_eq_desc *d, 121 const struct dpaa2_fd *fd, 122 int num_frames); 123 static int qbman_swp_pull_direct(struct qbman_swp *s, 124 struct qbman_pull_desc *d); 125 static int qbman_swp_pull_mem_back(struct qbman_swp *s, 126 struct qbman_pull_desc *d); 127 128 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s); 129 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s); 130 131 static int qbman_swp_release_direct(struct qbman_swp *s, 132 const struct qbman_release_desc *d, 133 const u64 *buffers, 134 unsigned int num_buffers); 135 static int qbman_swp_release_mem_back(struct qbman_swp *s, 136 const struct qbman_release_desc *d, 137 const u64 *buffers, 138 unsigned int num_buffers); 139 140 /* Function pointers */ 141 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, 142 const struct qbman_eq_desc *d, 143 const struct dpaa2_fd *fd) 144 = qbman_swp_enqueue_direct; 145 146 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, 147 const struct qbman_eq_desc *d, 148 const struct dpaa2_fd *fd, 149 uint32_t *flags, 150 int num_frames) 151 = qbman_swp_enqueue_multiple_direct; 152 153 int 154 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, 155 const struct qbman_eq_desc *d, 156 const struct dpaa2_fd *fd, 157 int num_frames) 158 = qbman_swp_enqueue_multiple_desc_direct; 159 160 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d) 161 = qbman_swp_pull_direct; 162 163 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s) 164 = qbman_swp_dqrr_next_direct; 165 166 int (*qbman_swp_release_ptr)(struct qbman_swp *s, 167 const struct qbman_release_desc *d, 168 const u64 *buffers, 169 unsigned int num_buffers) 170 = qbman_swp_release_direct; 171 172 /* Portal Access */ 173 174 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset) 175 { 176 return readl_relaxed(p->addr_cinh + offset); 177 } 178 179 static inline void qbman_write_register(struct qbman_swp *p, u32 offset, 180 u32 value) 181 { 182 writel_relaxed(value, p->addr_cinh + offset); 183 } 184 185 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset) 186 { 187 return p->addr_cena + offset; 188 } 189 190 #define QBMAN_CINH_SWP_CFG 0xd00 191 192 #define SWP_CFG_DQRR_MF_SHIFT 20 193 #define SWP_CFG_EST_SHIFT 16 194 #define SWP_CFG_CPBS_SHIFT 15 195 #define SWP_CFG_WN_SHIFT 14 196 #define SWP_CFG_RPM_SHIFT 12 197 #define SWP_CFG_DCM_SHIFT 10 198 #define SWP_CFG_EPM_SHIFT 8 199 #define SWP_CFG_VPM_SHIFT 7 200 #define SWP_CFG_CPM_SHIFT 6 201 #define SWP_CFG_SD_SHIFT 5 202 #define SWP_CFG_SP_SHIFT 4 203 #define SWP_CFG_SE_SHIFT 3 204 #define SWP_CFG_DP_SHIFT 2 205 #define SWP_CFG_DE_SHIFT 1 206 #define SWP_CFG_EP_SHIFT 0 207 208 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, 209 u8 epm, int sd, int sp, int se, 210 int dp, int de, int ep) 211 { 212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT | 213 est << SWP_CFG_EST_SHIFT | 214 wn << SWP_CFG_WN_SHIFT | 215 rpm << SWP_CFG_RPM_SHIFT | 216 dcm << SWP_CFG_DCM_SHIFT | 217 epm << SWP_CFG_EPM_SHIFT | 218 sd << SWP_CFG_SD_SHIFT | 219 sp << SWP_CFG_SP_SHIFT | 220 se << SWP_CFG_SE_SHIFT | 221 dp << SWP_CFG_DP_SHIFT | 222 de << SWP_CFG_DE_SHIFT | 223 ep << SWP_CFG_EP_SHIFT); 224 } 225 226 #define QMAN_RT_MODE 0x00000100 227 228 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) 229 { 230 /* 'first' is included, 'last' is excluded */ 231 if (first <= last) 232 return last - first; 233 else 234 return (2 * ringsize) - (first - last); 235 } 236 237 /** 238 * qbman_swp_init() - Create a functional object representing the given 239 * QBMan portal descriptor. 240 * @d: the given qbman swp descriptor 241 * 242 * Return qbman_swp portal for success, NULL if the object cannot 243 * be created. 244 */ 245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 246 { 247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL); 248 u32 reg; 249 u32 mask_size; 250 u32 eqcr_pi; 251 252 if (!p) 253 return NULL; 254 255 spin_lock_init(&p->access_spinlock); 256 257 p->desc = d; 258 p->mc.valid_bit = QB_VALID_BIT; 259 p->sdq = 0; 260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; 261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; 262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; 263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 264 p->mr.valid_bit = QB_VALID_BIT; 265 266 atomic_set(&p->vdq.available, 1); 267 p->vdq.valid_bit = QB_VALID_BIT; 268 p->dqrr.next_idx = 0; 269 p->dqrr.valid_bit = QB_VALID_BIT; 270 271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) { 272 p->dqrr.dqrr_size = 4; 273 p->dqrr.reset_bug = 1; 274 } else { 275 p->dqrr.dqrr_size = 8; 276 p->dqrr.reset_bug = 0; 277 } 278 279 p->addr_cena = d->cena_bar; 280 p->addr_cinh = d->cinh_bar; 281 282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 283 284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 285 1, /* Writes Non-cacheable */ 286 0, /* EQCR_CI stashing threshold */ 287 3, /* RPM: RCR in array mode */ 288 2, /* DCM: Discrete consumption ack */ 289 2, /* EPM: EQCR in ring mode */ 290 1, /* mem stashing drop enable enable */ 291 1, /* mem stashing priority enable */ 292 1, /* mem stashing enable */ 293 1, /* dequeue stashing priority enable */ 294 0, /* dequeue stashing enable enable */ 295 0); /* EQCR_CI stashing priority enable */ 296 } else { 297 memset(p->addr_cena, 0, 64 * 1024); 298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 299 1, /* Writes Non-cacheable */ 300 1, /* EQCR_CI stashing threshold */ 301 3, /* RPM: RCR in array mode */ 302 2, /* DCM: Discrete consumption ack */ 303 0, /* EPM: EQCR in ring mode */ 304 1, /* mem stashing drop enable */ 305 1, /* mem stashing priority enable */ 306 1, /* mem stashing enable */ 307 1, /* dequeue stashing priority enable */ 308 0, /* dequeue stashing enable */ 309 0); /* EQCR_CI stashing priority enable */ 310 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ 311 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ 312 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ 313 } 314 315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); 317 if (!reg) { 318 pr_err("qbman: the portal is not enabled!\n"); 319 kfree(p); 320 return NULL; 321 } 322 323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE); 325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE); 326 } 327 /* 328 * SDQCR needs to be initialized to 0 when no channels are 329 * being dequeued from or else the QMan HW will indicate an 330 * error. The values that were calculated above will be 331 * applied when dequeues from a specific channel are enabled. 332 */ 333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); 334 335 p->eqcr.pi_ring_size = 8; 336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 337 p->eqcr.pi_ring_size = 32; 338 qbman_swp_enqueue_ptr = 339 qbman_swp_enqueue_mem_back; 340 qbman_swp_enqueue_multiple_ptr = 341 qbman_swp_enqueue_multiple_mem_back; 342 qbman_swp_enqueue_multiple_desc_ptr = 343 qbman_swp_enqueue_multiple_desc_mem_back; 344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back; 345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back; 346 qbman_swp_release_ptr = qbman_swp_release_mem_back; 347 } 348 349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) 350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; 351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI); 352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; 353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; 354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI) 355 & p->eqcr.pi_ci_mask; 356 p->eqcr.available = p->eqcr.pi_ring_size; 357 358 return p; 359 } 360 361 /** 362 * qbman_swp_finish() - Create and destroy a functional object representing 363 * the given QBMan portal descriptor. 364 * @p: the qbman_swp object to be destroyed 365 */ 366 void qbman_swp_finish(struct qbman_swp *p) 367 { 368 kfree(p); 369 } 370 371 /** 372 * qbman_swp_interrupt_read_status() 373 * @p: the given software portal 374 * 375 * Return the value in the SWP_ISR register. 376 */ 377 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p) 378 { 379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR); 380 } 381 382 /** 383 * qbman_swp_interrupt_clear_status() 384 * @p: the given software portal 385 * @mask: The mask to clear in SWP_ISR register 386 */ 387 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask) 388 { 389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask); 390 } 391 392 /** 393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register 394 * @p: the given software portal 395 * 396 * Return the value in the SWP_IER register. 397 */ 398 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p) 399 { 400 return qbman_read_register(p, QBMAN_CINH_SWP_IER); 401 } 402 403 /** 404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp 405 * @p: the given software portal 406 * @mask: The mask of bits to enable in SWP_IER 407 */ 408 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask) 409 { 410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask); 411 } 412 413 /** 414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register 415 * @p: the given software portal object 416 * 417 * Return the value in the SWP_IIR register. 418 */ 419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) 420 { 421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR); 422 } 423 424 /** 425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register 426 * @p: the given software portal object 427 * @mask: The mask to set in SWP_IIR register 428 */ 429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) 430 { 431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); 432 } 433 434 /* 435 * Different management commands all use this common base layer of code to issue 436 * commands and poll for results. 437 */ 438 439 /* 440 * Returns a pointer to where the caller should fill in their management command 441 * (caller should ignore the verb byte) 442 */ 443 void *qbman_swp_mc_start(struct qbman_swp *p) 444 { 445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 447 else 448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM); 449 } 450 451 /* 452 * Commits merges in the caller-supplied command verb (which should not include 453 * the valid-bit) and submits the command to hardware 454 */ 455 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb) 456 { 457 u8 *v = cmd; 458 459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 460 dma_wmb(); 461 *v = cmd_verb | p->mc.valid_bit; 462 } else { 463 *v = cmd_verb | p->mc.valid_bit; 464 dma_wmb(); 465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE); 466 } 467 } 468 469 /* 470 * Checks for a completed response (returns non-NULL if only if the response 471 * is complete). 472 */ 473 void *qbman_swp_mc_result(struct qbman_swp *p) 474 { 475 u32 *ret, verb; 476 477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 479 /* Remove the valid-bit - command completed if the rest 480 * is non-zero. 481 */ 482 verb = ret[0] & ~QB_VALID_BIT; 483 if (!verb) 484 return NULL; 485 p->mc.valid_bit ^= QB_VALID_BIT; 486 } else { 487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM); 488 /* Command completed if the valid bit is toggled */ 489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT)) 490 return NULL; 491 /* Command completed if the rest is non-zero */ 492 verb = ret[0] & ~QB_VALID_BIT; 493 if (!verb) 494 return NULL; 495 p->mr.valid_bit ^= QB_VALID_BIT; 496 } 497 498 return ret; 499 } 500 501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0 502 enum qb_enqueue_commands { 503 enqueue_empty = 0, 504 enqueue_response_always = 1, 505 enqueue_rejects_to_fq = 2 506 }; 507 508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7 512 513 /** 514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to 515 * default/starting state. 516 */ 517 void qbman_eq_desc_clear(struct qbman_eq_desc *d) 518 { 519 memset(d, 0, sizeof(*d)); 520 } 521 522 /** 523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp 524 * @d: the enqueue descriptor. 525 * @response_success: 1 = enqueue with response always; 0 = enqueue with 526 * rejections returned on a FQ. 527 */ 528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) 529 { 530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT); 531 if (respond_success) 532 d->verb |= enqueue_response_always; 533 else 534 d->verb |= enqueue_rejects_to_fq; 535 } 536 537 /* 538 * Exactly one of the following descriptor "targets" should be set. (Calling any 539 * one of these will replace the effect of any prior call to one of these.) 540 * -enqueue to a frame queue 541 * -enqueue to a queuing destination 542 */ 543 544 /** 545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command 546 * @d: the enqueue descriptor 547 * @fqid: the id of the frame queue to be enqueued 548 */ 549 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid) 550 { 551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT); 552 d->tgtid = cpu_to_le32(fqid); 553 } 554 555 /** 556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command 557 * @d: the enqueue descriptor 558 * @qdid: the id of the queuing destination to be enqueued 559 * @qd_bin: the queuing destination bin 560 * @qd_prio: the queuing destination priority 561 */ 562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, 563 u32 qd_bin, u32 qd_prio) 564 { 565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT; 566 d->tgtid = cpu_to_le32(qdid); 567 d->qdbin = cpu_to_le16(qd_bin); 568 d->qpri = qd_prio; 569 } 570 571 #define EQAR_IDX(eqar) ((eqar) & 0x7) 572 #define EQAR_VB(eqar) ((eqar) & 0x80) 573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 574 575 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, 576 u8 idx) 577 { 578 if (idx < 16) 579 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4, 580 QMAN_RT_MODE); 581 else 582 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 + 583 (idx - 16) * 4, 584 QMAN_RT_MODE); 585 } 586 587 #define QB_RT_BIT ((u32)0x100) 588 /** 589 * qbman_swp_enqueue_direct() - Issue an enqueue command 590 * @s: the software portal used for enqueue 591 * @d: the enqueue descriptor 592 * @fd: the frame descriptor to be enqueued 593 * 594 * Please note that 'fd' should only be NULL if the "action" of the 595 * descriptor is "orp_hole" or "orp_nesn". 596 * 597 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 598 */ 599 static 600 int qbman_swp_enqueue_direct(struct qbman_swp *s, 601 const struct qbman_eq_desc *d, 602 const struct dpaa2_fd *fd) 603 { 604 int flags = 0; 605 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1); 606 607 if (ret >= 0) 608 ret = 0; 609 else 610 ret = -EBUSY; 611 return ret; 612 } 613 614 /** 615 * qbman_swp_enqueue_mem_back() - Issue an enqueue command 616 * @s: the software portal used for enqueue 617 * @d: the enqueue descriptor 618 * @fd: the frame descriptor to be enqueued 619 * 620 * Please note that 'fd' should only be NULL if the "action" of the 621 * descriptor is "orp_hole" or "orp_nesn". 622 * 623 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 624 */ 625 static 626 int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 627 const struct qbman_eq_desc *d, 628 const struct dpaa2_fd *fd) 629 { 630 int flags = 0; 631 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1); 632 633 if (ret >= 0) 634 ret = 0; 635 else 636 ret = -EBUSY; 637 return ret; 638 } 639 640 /** 641 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command 642 * using one enqueue descriptor 643 * @s: the software portal used for enqueue 644 * @d: the enqueue descriptor 645 * @fd: table pointer of frame descriptor table to be enqueued 646 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 647 * @num_frames: number of fd to be enqueued 648 * 649 * Return the number of fd enqueued, or a negative error number. 650 */ 651 static 652 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 653 const struct qbman_eq_desc *d, 654 const struct dpaa2_fd *fd, 655 uint32_t *flags, 656 int num_frames) 657 { 658 uint32_t *p = NULL; 659 const uint32_t *cl = (uint32_t *)d; 660 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 661 int i, num_enqueued = 0; 662 uint64_t addr_cena; 663 664 spin_lock(&s->access_spinlock); 665 half_mask = (s->eqcr.pi_ci_mask>>1); 666 full_mask = s->eqcr.pi_ci_mask; 667 668 if (!s->eqcr.available) { 669 eqcr_ci = s->eqcr.ci; 670 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 671 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 672 s->eqcr.ci &= full_mask; 673 674 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 675 eqcr_ci, s->eqcr.ci); 676 if (!s->eqcr.available) { 677 spin_unlock(&s->access_spinlock); 678 return 0; 679 } 680 } 681 682 eqcr_pi = s->eqcr.pi; 683 num_enqueued = (s->eqcr.available < num_frames) ? 684 s->eqcr.available : num_frames; 685 s->eqcr.available -= num_enqueued; 686 /* Fill in the EQCR ring */ 687 for (i = 0; i < num_enqueued; i++) { 688 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 689 /* Skip copying the verb */ 690 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 691 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 692 &fd[i], sizeof(*fd)); 693 eqcr_pi++; 694 } 695 696 dma_wmb(); 697 698 /* Set the verb byte, have to substitute in the valid-bit */ 699 eqcr_pi = s->eqcr.pi; 700 for (i = 0; i < num_enqueued; i++) { 701 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 702 p[0] = cl[0] | s->eqcr.pi_vb; 703 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 704 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 705 706 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 707 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 708 } 709 eqcr_pi++; 710 if (!(eqcr_pi & half_mask)) 711 s->eqcr.pi_vb ^= QB_VALID_BIT; 712 } 713 714 /* Flush all the cacheline without load/store in between */ 715 eqcr_pi = s->eqcr.pi; 716 addr_cena = (size_t)s->addr_cena; 717 for (i = 0; i < num_enqueued; i++) 718 eqcr_pi++; 719 s->eqcr.pi = eqcr_pi & full_mask; 720 spin_unlock(&s->access_spinlock); 721 722 return num_enqueued; 723 } 724 725 /** 726 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command 727 * using one enqueue descriptor 728 * @s: the software portal used for enqueue 729 * @d: the enqueue descriptor 730 * @fd: table pointer of frame descriptor table to be enqueued 731 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 732 * @num_frames: number of fd to be enqueued 733 * 734 * Return the number of fd enqueued, or a negative error number. 735 */ 736 static 737 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 738 const struct qbman_eq_desc *d, 739 const struct dpaa2_fd *fd, 740 uint32_t *flags, 741 int num_frames) 742 { 743 uint32_t *p = NULL; 744 const uint32_t *cl = (uint32_t *)(d); 745 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 746 int i, num_enqueued = 0; 747 unsigned long irq_flags; 748 749 spin_lock(&s->access_spinlock); 750 local_irq_save(irq_flags); 751 752 half_mask = (s->eqcr.pi_ci_mask>>1); 753 full_mask = s->eqcr.pi_ci_mask; 754 if (!s->eqcr.available) { 755 eqcr_ci = s->eqcr.ci; 756 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 757 s->eqcr.ci = *p & full_mask; 758 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 759 eqcr_ci, s->eqcr.ci); 760 if (!s->eqcr.available) { 761 local_irq_restore(irq_flags); 762 spin_unlock(&s->access_spinlock); 763 return 0; 764 } 765 } 766 767 eqcr_pi = s->eqcr.pi; 768 num_enqueued = (s->eqcr.available < num_frames) ? 769 s->eqcr.available : num_frames; 770 s->eqcr.available -= num_enqueued; 771 /* Fill in the EQCR ring */ 772 for (i = 0; i < num_enqueued; i++) { 773 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 774 /* Skip copying the verb */ 775 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 776 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 777 &fd[i], sizeof(*fd)); 778 eqcr_pi++; 779 } 780 781 /* Set the verb byte, have to substitute in the valid-bit */ 782 eqcr_pi = s->eqcr.pi; 783 for (i = 0; i < num_enqueued; i++) { 784 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 785 p[0] = cl[0] | s->eqcr.pi_vb; 786 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 787 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 788 789 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 790 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 791 } 792 eqcr_pi++; 793 if (!(eqcr_pi & half_mask)) 794 s->eqcr.pi_vb ^= QB_VALID_BIT; 795 } 796 s->eqcr.pi = eqcr_pi & full_mask; 797 798 dma_wmb(); 799 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 800 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 801 local_irq_restore(irq_flags); 802 spin_unlock(&s->access_spinlock); 803 804 return num_enqueued; 805 } 806 807 /** 808 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command 809 * using multiple enqueue descriptor 810 * @s: the software portal used for enqueue 811 * @d: table of minimal enqueue descriptor 812 * @fd: table pointer of frame descriptor table to be enqueued 813 * @num_frames: number of fd to be enqueued 814 * 815 * Return the number of fd enqueued, or a negative error number. 816 */ 817 static 818 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 819 const struct qbman_eq_desc *d, 820 const struct dpaa2_fd *fd, 821 int num_frames) 822 { 823 uint32_t *p; 824 const uint32_t *cl; 825 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 826 int i, num_enqueued = 0; 827 828 half_mask = (s->eqcr.pi_ci_mask>>1); 829 full_mask = s->eqcr.pi_ci_mask; 830 if (!s->eqcr.available) { 831 eqcr_ci = s->eqcr.ci; 832 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 833 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 834 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 835 eqcr_ci, s->eqcr.ci); 836 if (!s->eqcr.available) 837 return 0; 838 } 839 840 eqcr_pi = s->eqcr.pi; 841 num_enqueued = (s->eqcr.available < num_frames) ? 842 s->eqcr.available : num_frames; 843 s->eqcr.available -= num_enqueued; 844 /* Fill in the EQCR ring */ 845 for (i = 0; i < num_enqueued; i++) { 846 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 847 cl = (uint32_t *)(&d[i]); 848 /* Skip copying the verb */ 849 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 850 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 851 &fd[i], sizeof(*fd)); 852 eqcr_pi++; 853 } 854 855 dma_wmb(); 856 857 /* Set the verb byte, have to substitute in the valid-bit */ 858 eqcr_pi = s->eqcr.pi; 859 for (i = 0; i < num_enqueued; i++) { 860 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 861 cl = (uint32_t *)(&d[i]); 862 p[0] = cl[0] | s->eqcr.pi_vb; 863 eqcr_pi++; 864 if (!(eqcr_pi & half_mask)) 865 s->eqcr.pi_vb ^= QB_VALID_BIT; 866 } 867 868 /* Flush all the cacheline without load/store in between */ 869 eqcr_pi = s->eqcr.pi; 870 for (i = 0; i < num_enqueued; i++) 871 eqcr_pi++; 872 s->eqcr.pi = eqcr_pi & full_mask; 873 874 return num_enqueued; 875 } 876 877 /** 878 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command 879 * using multiple enqueue descriptor 880 * @s: the software portal used for enqueue 881 * @d: table of minimal enqueue descriptor 882 * @fd: table pointer of frame descriptor table to be enqueued 883 * @num_frames: number of fd to be enqueued 884 * 885 * Return the number of fd enqueued, or a negative error number. 886 */ 887 static 888 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 889 const struct qbman_eq_desc *d, 890 const struct dpaa2_fd *fd, 891 int num_frames) 892 { 893 uint32_t *p; 894 const uint32_t *cl; 895 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 896 int i, num_enqueued = 0; 897 898 half_mask = (s->eqcr.pi_ci_mask>>1); 899 full_mask = s->eqcr.pi_ci_mask; 900 if (!s->eqcr.available) { 901 eqcr_ci = s->eqcr.ci; 902 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 903 s->eqcr.ci = *p & full_mask; 904 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 905 eqcr_ci, s->eqcr.ci); 906 if (!s->eqcr.available) 907 return 0; 908 } 909 910 eqcr_pi = s->eqcr.pi; 911 num_enqueued = (s->eqcr.available < num_frames) ? 912 s->eqcr.available : num_frames; 913 s->eqcr.available -= num_enqueued; 914 /* Fill in the EQCR ring */ 915 for (i = 0; i < num_enqueued; i++) { 916 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 917 cl = (uint32_t *)(&d[i]); 918 /* Skip copying the verb */ 919 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 920 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 921 &fd[i], sizeof(*fd)); 922 eqcr_pi++; 923 } 924 925 /* Set the verb byte, have to substitute in the valid-bit */ 926 eqcr_pi = s->eqcr.pi; 927 for (i = 0; i < num_enqueued; i++) { 928 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 929 cl = (uint32_t *)(&d[i]); 930 p[0] = cl[0] | s->eqcr.pi_vb; 931 eqcr_pi++; 932 if (!(eqcr_pi & half_mask)) 933 s->eqcr.pi_vb ^= QB_VALID_BIT; 934 } 935 936 s->eqcr.pi = eqcr_pi & full_mask; 937 938 dma_wmb(); 939 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 940 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 941 942 return num_enqueued; 943 } 944 945 /* Static (push) dequeue */ 946 947 /** 948 * qbman_swp_push_get() - Get the push dequeue setup 949 * @p: the software portal object 950 * @channel_idx: the channel index to query 951 * @enabled: returned boolean to show whether the push dequeue is enabled 952 * for the given channel 953 */ 954 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled) 955 { 956 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; 957 958 WARN_ON(channel_idx > 15); 959 *enabled = src | (1 << channel_idx); 960 } 961 962 /** 963 * qbman_swp_push_set() - Enable or disable push dequeue 964 * @p: the software portal object 965 * @channel_idx: the channel index (0 to 15) 966 * @enable: enable or disable push dequeue 967 */ 968 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable) 969 { 970 u16 dqsrc; 971 972 WARN_ON(channel_idx > 15); 973 if (enable) 974 s->sdq |= 1 << channel_idx; 975 else 976 s->sdq &= ~(1 << channel_idx); 977 978 /* Read make the complete src map. If no channels are enabled 979 * the SDQCR must be 0 or else QMan will assert errors 980 */ 981 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; 982 if (dqsrc != 0) 983 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq); 984 else 985 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0); 986 } 987 988 #define QB_VDQCR_VERB_DCT_SHIFT 0 989 #define QB_VDQCR_VERB_DT_SHIFT 2 990 #define QB_VDQCR_VERB_RLS_SHIFT 4 991 #define QB_VDQCR_VERB_WAE_SHIFT 5 992 993 enum qb_pull_dt_e { 994 qb_pull_dt_channel, 995 qb_pull_dt_workqueue, 996 qb_pull_dt_framequeue 997 }; 998 999 /** 1000 * qbman_pull_desc_clear() - Clear the contents of a descriptor to 1001 * default/starting state 1002 * @d: the pull dequeue descriptor to be cleared 1003 */ 1004 void qbman_pull_desc_clear(struct qbman_pull_desc *d) 1005 { 1006 memset(d, 0, sizeof(*d)); 1007 } 1008 1009 /** 1010 * qbman_pull_desc_set_storage()- Set the pull dequeue storage 1011 * @d: the pull dequeue descriptor to be set 1012 * @storage: the pointer of the memory to store the dequeue result 1013 * @storage_phys: the physical address of the storage memory 1014 * @stash: to indicate whether write allocate is enabled 1015 * 1016 * If not called, or if called with 'storage' as NULL, the result pull dequeues 1017 * will produce results to DQRR. If 'storage' is non-NULL, then results are 1018 * produced to the given memory location (using the DMA address which 1019 * the caller provides in 'storage_phys'), and 'stash' controls whether or not 1020 * those writes to main-memory express a cache-warming attribute. 1021 */ 1022 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, 1023 struct dpaa2_dq *storage, 1024 dma_addr_t storage_phys, 1025 int stash) 1026 { 1027 /* save the virtual address */ 1028 d->rsp_addr_virt = (u64)(uintptr_t)storage; 1029 1030 if (!storage) { 1031 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); 1032 return; 1033 } 1034 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT; 1035 if (stash) 1036 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT; 1037 else 1038 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT); 1039 1040 d->rsp_addr = cpu_to_le64(storage_phys); 1041 } 1042 1043 /** 1044 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued 1045 * @d: the pull dequeue descriptor to be set 1046 * @numframes: number of frames to be set, must be between 1 and 16, inclusive 1047 */ 1048 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes) 1049 { 1050 d->numf = numframes - 1; 1051 } 1052 1053 /* 1054 * Exactly one of the following descriptor "actions" should be set. (Calling any 1055 * one of these will replace the effect of any prior call to one of these.) 1056 * - pull dequeue from the given frame queue (FQ) 1057 * - pull dequeue from any FQ in the given work queue (WQ) 1058 * - pull dequeue from any FQ in any WQ in the given channel 1059 */ 1060 1061 /** 1062 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues 1063 * @fqid: the frame queue index of the given FQ 1064 */ 1065 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid) 1066 { 1067 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT; 1068 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT; 1069 d->dq_src = cpu_to_le32(fqid); 1070 } 1071 1072 /** 1073 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues 1074 * @wqid: composed of channel id and wqid within the channel 1075 * @dct: the dequeue command type 1076 */ 1077 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, 1078 enum qbman_pull_type_e dct) 1079 { 1080 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; 1081 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT; 1082 d->dq_src = cpu_to_le32(wqid); 1083 } 1084 1085 /** 1086 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command 1087 * dequeues 1088 * @chid: the channel id to be dequeued 1089 * @dct: the dequeue command type 1090 */ 1091 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, 1092 enum qbman_pull_type_e dct) 1093 { 1094 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; 1095 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT; 1096 d->dq_src = cpu_to_le32(chid); 1097 } 1098 1099 /** 1100 * qbman_swp_pull_direct() - Issue the pull dequeue command 1101 * @s: the software portal object 1102 * @d: the software portal descriptor which has been configured with 1103 * the set of qbman_pull_desc_set_*() calls 1104 * 1105 * Return 0 for success, and -EBUSY if the software portal is not ready 1106 * to do pull dequeue. 1107 */ 1108 static 1109 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) 1110 { 1111 struct qbman_pull_desc *p; 1112 1113 if (!atomic_dec_and_test(&s->vdq.available)) { 1114 atomic_inc(&s->vdq.available); 1115 return -EBUSY; 1116 } 1117 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 1118 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1119 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 1120 else 1121 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 1122 p->numf = d->numf; 1123 p->tok = QMAN_DQ_TOKEN_VALID; 1124 p->dq_src = d->dq_src; 1125 p->rsp_addr = d->rsp_addr; 1126 p->rsp_addr_virt = d->rsp_addr_virt; 1127 dma_wmb(); 1128 /* Set the verb byte, have to substitute in the valid-bit */ 1129 p->verb = d->verb | s->vdq.valid_bit; 1130 s->vdq.valid_bit ^= QB_VALID_BIT; 1131 1132 return 0; 1133 } 1134 1135 /** 1136 * qbman_swp_pull_mem_back() - Issue the pull dequeue command 1137 * @s: the software portal object 1138 * @d: the software portal descriptor which has been configured with 1139 * the set of qbman_pull_desc_set_*() calls 1140 * 1141 * Return 0 for success, and -EBUSY if the software portal is not ready 1142 * to do pull dequeue. 1143 */ 1144 static 1145 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) 1146 { 1147 struct qbman_pull_desc *p; 1148 1149 if (!atomic_dec_and_test(&s->vdq.available)) { 1150 atomic_inc(&s->vdq.available); 1151 return -EBUSY; 1152 } 1153 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 1154 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1155 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 1156 else 1157 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 1158 p->numf = d->numf; 1159 p->tok = QMAN_DQ_TOKEN_VALID; 1160 p->dq_src = d->dq_src; 1161 p->rsp_addr = d->rsp_addr; 1162 p->rsp_addr_virt = d->rsp_addr_virt; 1163 1164 /* Set the verb byte, have to substitute in the valid-bit */ 1165 p->verb = d->verb | s->vdq.valid_bit; 1166 s->vdq.valid_bit ^= QB_VALID_BIT; 1167 dma_wmb(); 1168 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); 1169 1170 return 0; 1171 } 1172 1173 #define QMAN_DQRR_PI_MASK 0xf 1174 1175 /** 1176 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry 1177 * @s: the software portal object 1178 * 1179 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 1180 * only once, so repeated calls can return a sequence of DQRR entries, without 1181 * requiring they be consumed immediately or in any particular order. 1182 */ 1183 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s) 1184 { 1185 u32 verb; 1186 u32 response_verb; 1187 u32 flags; 1188 struct dpaa2_dq *p; 1189 1190 /* Before using valid-bit to detect if something is there, we have to 1191 * handle the case of the DQRR reset bug... 1192 */ 1193 if (unlikely(s->dqrr.reset_bug)) { 1194 /* 1195 * We pick up new entries by cache-inhibited producer index, 1196 * which means that a non-coherent mapping would require us to 1197 * invalidate and read *only* once that PI has indicated that 1198 * there's an entry here. The first trip around the DQRR ring 1199 * will be much less efficient than all subsequent trips around 1200 * it... 1201 */ 1202 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & 1203 QMAN_DQRR_PI_MASK; 1204 1205 /* there are new entries if pi != next_idx */ 1206 if (pi == s->dqrr.next_idx) 1207 return NULL; 1208 1209 /* 1210 * if next_idx is/was the last ring index, and 'pi' is 1211 * different, we can disable the workaround as all the ring 1212 * entries have now been DMA'd to so valid-bit checking is 1213 * repaired. Note: this logic needs to be based on next_idx 1214 * (which increments one at a time), rather than on pi (which 1215 * can burst and wrap-around between our snapshots of it). 1216 */ 1217 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { 1218 pr_debug("next_idx=%d, pi=%d, clear reset bug\n", 1219 s->dqrr.next_idx, pi); 1220 s->dqrr.reset_bug = 0; 1221 } 1222 prefetch(qbman_get_cmd(s, 1223 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1224 } 1225 1226 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 1227 verb = p->dq.verb; 1228 1229 /* 1230 * If the valid-bit isn't of the expected polarity, nothing there. Note, 1231 * in the DQRR reset bug workaround, we shouldn't need to skip these 1232 * check, because we've already determined that a new entry is available 1233 * and we've invalidated the cacheline before reading it, so the 1234 * valid-bit behaviour is repaired and should tell us what we already 1235 * knew from reading PI. 1236 */ 1237 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 1238 prefetch(qbman_get_cmd(s, 1239 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1240 return NULL; 1241 } 1242 /* 1243 * There's something there. Move "next_idx" attention to the next ring 1244 * entry (and prefetch it) before returning what we found. 1245 */ 1246 s->dqrr.next_idx++; 1247 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ 1248 if (!s->dqrr.next_idx) 1249 s->dqrr.valid_bit ^= QB_VALID_BIT; 1250 1251 /* 1252 * If this is the final response to a volatile dequeue command 1253 * indicate that the vdq is available 1254 */ 1255 flags = p->dq.stat; 1256 response_verb = verb & QBMAN_RESULT_MASK; 1257 if ((response_verb == QBMAN_RESULT_DQ) && 1258 (flags & DPAA2_DQ_STAT_VOLATILE) && 1259 (flags & DPAA2_DQ_STAT_EXPIRED)) 1260 atomic_inc(&s->vdq.available); 1261 1262 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1263 1264 return p; 1265 } 1266 1267 /** 1268 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry 1269 * @s: the software portal object 1270 * 1271 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 1272 * only once, so repeated calls can return a sequence of DQRR entries, without 1273 * requiring they be consumed immediately or in any particular order. 1274 */ 1275 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s) 1276 { 1277 u32 verb; 1278 u32 response_verb; 1279 u32 flags; 1280 struct dpaa2_dq *p; 1281 1282 /* Before using valid-bit to detect if something is there, we have to 1283 * handle the case of the DQRR reset bug... 1284 */ 1285 if (unlikely(s->dqrr.reset_bug)) { 1286 /* 1287 * We pick up new entries by cache-inhibited producer index, 1288 * which means that a non-coherent mapping would require us to 1289 * invalidate and read *only* once that PI has indicated that 1290 * there's an entry here. The first trip around the DQRR ring 1291 * will be much less efficient than all subsequent trips around 1292 * it... 1293 */ 1294 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & 1295 QMAN_DQRR_PI_MASK; 1296 1297 /* there are new entries if pi != next_idx */ 1298 if (pi == s->dqrr.next_idx) 1299 return NULL; 1300 1301 /* 1302 * if next_idx is/was the last ring index, and 'pi' is 1303 * different, we can disable the workaround as all the ring 1304 * entries have now been DMA'd to so valid-bit checking is 1305 * repaired. Note: this logic needs to be based on next_idx 1306 * (which increments one at a time), rather than on pi (which 1307 * can burst and wrap-around between our snapshots of it). 1308 */ 1309 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { 1310 pr_debug("next_idx=%d, pi=%d, clear reset bug\n", 1311 s->dqrr.next_idx, pi); 1312 s->dqrr.reset_bug = 0; 1313 } 1314 prefetch(qbman_get_cmd(s, 1315 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1316 } 1317 1318 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); 1319 verb = p->dq.verb; 1320 1321 /* 1322 * If the valid-bit isn't of the expected polarity, nothing there. Note, 1323 * in the DQRR reset bug workaround, we shouldn't need to skip these 1324 * check, because we've already determined that a new entry is available 1325 * and we've invalidated the cacheline before reading it, so the 1326 * valid-bit behaviour is repaired and should tell us what we already 1327 * knew from reading PI. 1328 */ 1329 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 1330 prefetch(qbman_get_cmd(s, 1331 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1332 return NULL; 1333 } 1334 /* 1335 * There's something there. Move "next_idx" attention to the next ring 1336 * entry (and prefetch it) before returning what we found. 1337 */ 1338 s->dqrr.next_idx++; 1339 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ 1340 if (!s->dqrr.next_idx) 1341 s->dqrr.valid_bit ^= QB_VALID_BIT; 1342 1343 /* 1344 * If this is the final response to a volatile dequeue command 1345 * indicate that the vdq is available 1346 */ 1347 flags = p->dq.stat; 1348 response_verb = verb & QBMAN_RESULT_MASK; 1349 if ((response_verb == QBMAN_RESULT_DQ) && 1350 (flags & DPAA2_DQ_STAT_VOLATILE) && 1351 (flags & DPAA2_DQ_STAT_EXPIRED)) 1352 atomic_inc(&s->vdq.available); 1353 1354 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1355 1356 return p; 1357 } 1358 1359 /** 1360 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from 1361 * qbman_swp_dqrr_next(). 1362 * @s: the software portal object 1363 * @dq: the DQRR entry to be consumed 1364 */ 1365 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) 1366 { 1367 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); 1368 } 1369 1370 /** 1371 * qbman_result_has_new_result() - Check and get the dequeue response from the 1372 * dq storage memory set in pull dequeue command 1373 * @s: the software portal object 1374 * @dq: the dequeue result read from the memory 1375 * 1376 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid 1377 * dequeue result. 1378 * 1379 * Only used for user-provided storage of dequeue results, not DQRR. For 1380 * efficiency purposes, the driver will perform any required endianness 1381 * conversion to ensure that the user's dequeue result storage is in host-endian 1382 * format. As such, once the user has called qbman_result_has_new_result() and 1383 * been returned a valid dequeue result, they should not call it again on 1384 * the same memory location (except of course if another dequeue command has 1385 * been executed to produce a new result to that location). 1386 */ 1387 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq) 1388 { 1389 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID) 1390 return 0; 1391 1392 /* 1393 * Set token to be 0 so we will detect change back to 1 1394 * next time the looping is traversed. Const is cast away here 1395 * as we want users to treat the dequeue responses as read only. 1396 */ 1397 ((struct dpaa2_dq *)dq)->dq.tok = 0; 1398 1399 /* 1400 * Determine whether VDQCR is available based on whether the 1401 * current result is sitting in the first storage location of 1402 * the busy command. 1403 */ 1404 if (s->vdq.storage == dq) { 1405 s->vdq.storage = NULL; 1406 atomic_inc(&s->vdq.available); 1407 } 1408 1409 return 1; 1410 } 1411 1412 /** 1413 * qbman_release_desc_clear() - Clear the contents of a descriptor to 1414 * default/starting state. 1415 */ 1416 void qbman_release_desc_clear(struct qbman_release_desc *d) 1417 { 1418 memset(d, 0, sizeof(*d)); 1419 d->verb = 1 << 5; /* Release Command Valid */ 1420 } 1421 1422 /** 1423 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to 1424 */ 1425 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid) 1426 { 1427 d->bpid = cpu_to_le16(bpid); 1428 } 1429 1430 /** 1431 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI 1432 * interrupt source should be asserted after the release command is completed. 1433 */ 1434 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) 1435 { 1436 if (enable) 1437 d->verb |= 1 << 6; 1438 else 1439 d->verb &= ~(1 << 6); 1440 } 1441 1442 #define RAR_IDX(rar) ((rar) & 0x7) 1443 #define RAR_VB(rar) ((rar) & 0x80) 1444 #define RAR_SUCCESS(rar) ((rar) & 0x100) 1445 1446 /** 1447 * qbman_swp_release_direct() - Issue a buffer release command 1448 * @s: the software portal object 1449 * @d: the release descriptor 1450 * @buffers: a pointer pointing to the buffer address to be released 1451 * @num_buffers: number of buffers to be released, must be less than 8 1452 * 1453 * Return 0 for success, -EBUSY if the release command ring is not ready. 1454 */ 1455 int qbman_swp_release_direct(struct qbman_swp *s, 1456 const struct qbman_release_desc *d, 1457 const u64 *buffers, unsigned int num_buffers) 1458 { 1459 int i; 1460 struct qbman_release_desc *p; 1461 u32 rar; 1462 1463 if (!num_buffers || (num_buffers > 7)) 1464 return -EINVAL; 1465 1466 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); 1467 if (!RAR_SUCCESS(rar)) 1468 return -EBUSY; 1469 1470 /* Start the release command */ 1471 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 1472 1473 /* Copy the caller's buffer pointers to the command */ 1474 for (i = 0; i < num_buffers; i++) 1475 p->buf[i] = cpu_to_le64(buffers[i]); 1476 p->bpid = d->bpid; 1477 1478 /* 1479 * Set the verb byte, have to substitute in the valid-bit 1480 * and the number of buffers. 1481 */ 1482 dma_wmb(); 1483 p->verb = d->verb | RAR_VB(rar) | num_buffers; 1484 1485 return 0; 1486 } 1487 1488 /** 1489 * qbman_swp_release_mem_back() - Issue a buffer release command 1490 * @s: the software portal object 1491 * @d: the release descriptor 1492 * @buffers: a pointer pointing to the buffer address to be released 1493 * @num_buffers: number of buffers to be released, must be less than 8 1494 * 1495 * Return 0 for success, -EBUSY if the release command ring is not ready. 1496 */ 1497 int qbman_swp_release_mem_back(struct qbman_swp *s, 1498 const struct qbman_release_desc *d, 1499 const u64 *buffers, unsigned int num_buffers) 1500 { 1501 int i; 1502 struct qbman_release_desc *p; 1503 u32 rar; 1504 1505 if (!num_buffers || (num_buffers > 7)) 1506 return -EINVAL; 1507 1508 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); 1509 if (!RAR_SUCCESS(rar)) 1510 return -EBUSY; 1511 1512 /* Start the release command */ 1513 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); 1514 1515 /* Copy the caller's buffer pointers to the command */ 1516 for (i = 0; i < num_buffers; i++) 1517 p->buf[i] = cpu_to_le64(buffers[i]); 1518 p->bpid = d->bpid; 1519 1520 p->verb = d->verb | RAR_VB(rar) | num_buffers; 1521 dma_wmb(); 1522 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + 1523 RAR_IDX(rar) * 4, QMAN_RT_MODE); 1524 1525 return 0; 1526 } 1527 1528 struct qbman_acquire_desc { 1529 u8 verb; 1530 u8 reserved; 1531 __le16 bpid; 1532 u8 num; 1533 u8 reserved2[59]; 1534 }; 1535 1536 struct qbman_acquire_rslt { 1537 u8 verb; 1538 u8 rslt; 1539 __le16 reserved; 1540 u8 num; 1541 u8 reserved2[3]; 1542 __le64 buf[7]; 1543 }; 1544 1545 /** 1546 * qbman_swp_acquire() - Issue a buffer acquire command 1547 * @s: the software portal object 1548 * @bpid: the buffer pool index 1549 * @buffers: a pointer pointing to the acquired buffer addresses 1550 * @num_buffers: number of buffers to be acquired, must be less than 8 1551 * 1552 * Return 0 for success, or negative error code if the acquire command 1553 * fails. 1554 */ 1555 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, 1556 unsigned int num_buffers) 1557 { 1558 struct qbman_acquire_desc *p; 1559 struct qbman_acquire_rslt *r; 1560 int i; 1561 1562 if (!num_buffers || (num_buffers > 7)) 1563 return -EINVAL; 1564 1565 /* Start the management command */ 1566 p = qbman_swp_mc_start(s); 1567 1568 if (!p) 1569 return -EBUSY; 1570 1571 /* Encode the caller-provided attributes */ 1572 p->bpid = cpu_to_le16(bpid); 1573 p->num = num_buffers; 1574 1575 /* Complete the management command */ 1576 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE); 1577 if (unlikely(!r)) { 1578 pr_err("qbman: acquire from BPID %d failed, no response\n", 1579 bpid); 1580 return -EIO; 1581 } 1582 1583 /* Decode the outcome */ 1584 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE); 1585 1586 /* Determine success or failure */ 1587 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1588 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n", 1589 bpid, r->rslt); 1590 return -EIO; 1591 } 1592 1593 WARN_ON(r->num > num_buffers); 1594 1595 /* Copy the acquired buffers to the caller's array */ 1596 for (i = 0; i < r->num; i++) 1597 buffers[i] = le64_to_cpu(r->buf[i]); 1598 1599 return (int)r->num; 1600 } 1601 1602 struct qbman_alt_fq_state_desc { 1603 u8 verb; 1604 u8 reserved[3]; 1605 __le32 fqid; 1606 u8 reserved2[56]; 1607 }; 1608 1609 struct qbman_alt_fq_state_rslt { 1610 u8 verb; 1611 u8 rslt; 1612 u8 reserved[62]; 1613 }; 1614 1615 #define ALT_FQ_FQID_MASK 0x00FFFFFF 1616 1617 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, 1618 u8 alt_fq_verb) 1619 { 1620 struct qbman_alt_fq_state_desc *p; 1621 struct qbman_alt_fq_state_rslt *r; 1622 1623 /* Start the management command */ 1624 p = qbman_swp_mc_start(s); 1625 if (!p) 1626 return -EBUSY; 1627 1628 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK); 1629 1630 /* Complete the management command */ 1631 r = qbman_swp_mc_complete(s, p, alt_fq_verb); 1632 if (unlikely(!r)) { 1633 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n", 1634 alt_fq_verb); 1635 return -EIO; 1636 } 1637 1638 /* Decode the outcome */ 1639 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb); 1640 1641 /* Determine success or failure */ 1642 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1643 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n", 1644 fqid, r->verb, r->rslt); 1645 return -EIO; 1646 } 1647 1648 return 0; 1649 } 1650 1651 struct qbman_cdan_ctrl_desc { 1652 u8 verb; 1653 u8 reserved; 1654 __le16 ch; 1655 u8 we; 1656 u8 ctrl; 1657 __le16 reserved2; 1658 __le64 cdan_ctx; 1659 u8 reserved3[48]; 1660 1661 }; 1662 1663 struct qbman_cdan_ctrl_rslt { 1664 u8 verb; 1665 u8 rslt; 1666 __le16 ch; 1667 u8 reserved[60]; 1668 }; 1669 1670 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, 1671 u8 we_mask, u8 cdan_en, 1672 u64 ctx) 1673 { 1674 struct qbman_cdan_ctrl_desc *p = NULL; 1675 struct qbman_cdan_ctrl_rslt *r = NULL; 1676 1677 /* Start the management command */ 1678 p = qbman_swp_mc_start(s); 1679 if (!p) 1680 return -EBUSY; 1681 1682 /* Encode the caller-provided attributes */ 1683 p->ch = cpu_to_le16(channelid); 1684 p->we = we_mask; 1685 if (cdan_en) 1686 p->ctrl = 1; 1687 else 1688 p->ctrl = 0; 1689 p->cdan_ctx = cpu_to_le64(ctx); 1690 1691 /* Complete the management command */ 1692 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE); 1693 if (unlikely(!r)) { 1694 pr_err("qbman: wqchan config failed, no response\n"); 1695 return -EIO; 1696 } 1697 1698 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE); 1699 1700 /* Determine success or failure */ 1701 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1702 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n", 1703 channelid, r->rslt); 1704 return -EIO; 1705 } 1706 1707 return 0; 1708 } 1709 1710 #define QBMAN_RESPONSE_VERB_MASK 0x7f 1711 #define QBMAN_FQ_QUERY_NP 0x45 1712 #define QBMAN_BP_QUERY 0x32 1713 1714 struct qbman_fq_query_desc { 1715 u8 verb; 1716 u8 reserved[3]; 1717 __le32 fqid; 1718 u8 reserved2[56]; 1719 }; 1720 1721 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, 1722 struct qbman_fq_query_np_rslt *r) 1723 { 1724 struct qbman_fq_query_desc *p; 1725 void *resp; 1726 1727 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s); 1728 if (!p) 1729 return -EBUSY; 1730 1731 /* FQID is a 24 bit value */ 1732 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF); 1733 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); 1734 if (!resp) { 1735 pr_err("qbman: Query FQID %d NP fields failed, no response\n", 1736 fqid); 1737 return -EIO; 1738 } 1739 *r = *(struct qbman_fq_query_np_rslt *)resp; 1740 /* Decode the outcome */ 1741 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP); 1742 1743 /* Determine success or failure */ 1744 if (r->rslt != QBMAN_MC_RSLT_OK) { 1745 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", 1746 p->fqid, r->rslt); 1747 return -EIO; 1748 } 1749 1750 return 0; 1751 } 1752 1753 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r) 1754 { 1755 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF); 1756 } 1757 1758 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r) 1759 { 1760 return le32_to_cpu(r->byte_cnt); 1761 } 1762 1763 struct qbman_bp_query_desc { 1764 u8 verb; 1765 u8 reserved; 1766 __le16 bpid; 1767 u8 reserved2[60]; 1768 }; 1769 1770 int qbman_bp_query(struct qbman_swp *s, u16 bpid, 1771 struct qbman_bp_query_rslt *r) 1772 { 1773 struct qbman_bp_query_desc *p; 1774 void *resp; 1775 1776 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s); 1777 if (!p) 1778 return -EBUSY; 1779 1780 p->bpid = cpu_to_le16(bpid); 1781 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY); 1782 if (!resp) { 1783 pr_err("qbman: Query BPID %d fields failed, no response\n", 1784 bpid); 1785 return -EIO; 1786 } 1787 *r = *(struct qbman_bp_query_rslt *)resp; 1788 /* Decode the outcome */ 1789 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); 1790 1791 /* Determine success or failure */ 1792 if (r->rslt != QBMAN_MC_RSLT_OK) { 1793 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n", 1794 bpid, r->rslt); 1795 return -EIO; 1796 } 1797 1798 return 0; 1799 } 1800 1801 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a) 1802 { 1803 return le32_to_cpu(a->fill); 1804 } 1805