1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 8 #include <asm/cacheflush.h> 9 #include <linux/io.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <soc/fsl/dpaa2-global.h> 13 14 #include "qbman-portal.h" 15 16 /* All QBMan command and result structures use this "valid bit" encoding */ 17 #define QB_VALID_BIT ((u32)0x80) 18 19 /* QBMan portal management command codes */ 20 #define QBMAN_MC_ACQUIRE 0x30 21 #define QBMAN_WQCHAN_CONFIGURE 0x46 22 23 /* CINH register offsets */ 24 #define QBMAN_CINH_SWP_EQCR_PI 0x800 25 #define QBMAN_CINH_SWP_EQCR_CI 0x840 26 #define QBMAN_CINH_SWP_EQAR 0x8c0 27 #define QBMAN_CINH_SWP_CR_RT 0x900 28 #define QBMAN_CINH_SWP_VDQCR_RT 0x940 29 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 30 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 31 #define QBMAN_CINH_SWP_DQPI 0xa00 32 #define QBMAN_CINH_SWP_DCAP 0xac0 33 #define QBMAN_CINH_SWP_SDQCR 0xb00 34 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 35 #define QBMAN_CINH_SWP_RCR_PI 0xc00 36 #define QBMAN_CINH_SWP_RAR 0xcc0 37 #define QBMAN_CINH_SWP_ISR 0xe00 38 #define QBMAN_CINH_SWP_IER 0xe40 39 #define QBMAN_CINH_SWP_ISDR 0xe80 40 #define QBMAN_CINH_SWP_IIR 0xec0 41 42 /* CENA register offsets */ 43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) 44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6)) 45 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6)) 46 #define QBMAN_CENA_SWP_CR 0x600 47 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 48 #define QBMAN_CENA_SWP_VDQCR 0x780 49 #define QBMAN_CENA_SWP_EQCR_CI 0x840 50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840 51 52 /* CENA register offsets in memory-backed mode */ 53 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) 54 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6)) 55 #define QBMAN_CENA_SWP_CR_MEM 0x1600 56 #define QBMAN_CENA_SWP_RR_MEM 0x1680 57 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780 58 59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) 61 62 /* Define token used to determine if response written to memory is valid */ 63 #define QMAN_DQ_TOKEN_VALID 1 64 65 /* SDQCR attribute codes */ 66 #define QB_SDQCR_FC_SHIFT 29 67 #define QB_SDQCR_FC_MASK 0x1 68 #define QB_SDQCR_DCT_SHIFT 24 69 #define QB_SDQCR_DCT_MASK 0x3 70 #define QB_SDQCR_TOK_SHIFT 16 71 #define QB_SDQCR_TOK_MASK 0xff 72 #define QB_SDQCR_SRC_SHIFT 0 73 #define QB_SDQCR_SRC_MASK 0xffff 74 75 /* opaque token for static dequeues */ 76 #define QMAN_SDQCR_TOKEN 0xbb 77 78 #define QBMAN_EQCR_DCA_IDXMASK 0x0f 79 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31) 80 81 #define EQ_DESC_SIZE_WITHOUT_FD 29 82 #define EQ_DESC_SIZE_FD_START 32 83 84 enum qbman_sdqcr_dct { 85 qbman_sdqcr_dct_null = 0, 86 qbman_sdqcr_dct_prio_ics, 87 qbman_sdqcr_dct_active_ics, 88 qbman_sdqcr_dct_active 89 }; 90 91 enum qbman_sdqcr_fc { 92 qbman_sdqcr_fc_one = 0, 93 qbman_sdqcr_fc_up_to_3 = 1 94 }; 95 96 /* Internal Function declaration */ 97 static int qbman_swp_enqueue_direct(struct qbman_swp *s, 98 const struct qbman_eq_desc *d, 99 const struct dpaa2_fd *fd); 100 static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 101 const struct qbman_eq_desc *d, 102 const struct dpaa2_fd *fd); 103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 104 const struct qbman_eq_desc *d, 105 const struct dpaa2_fd *fd, 106 uint32_t *flags, 107 int num_frames); 108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 109 const struct qbman_eq_desc *d, 110 const struct dpaa2_fd *fd, 111 uint32_t *flags, 112 int num_frames); 113 static int 114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 115 const struct qbman_eq_desc *d, 116 const struct dpaa2_fd *fd, 117 int num_frames); 118 static 119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 120 const struct qbman_eq_desc *d, 121 const struct dpaa2_fd *fd, 122 int num_frames); 123 static int qbman_swp_pull_direct(struct qbman_swp *s, 124 struct qbman_pull_desc *d); 125 static int qbman_swp_pull_mem_back(struct qbman_swp *s, 126 struct qbman_pull_desc *d); 127 128 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s); 129 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s); 130 131 static int qbman_swp_release_direct(struct qbman_swp *s, 132 const struct qbman_release_desc *d, 133 const u64 *buffers, 134 unsigned int num_buffers); 135 static int qbman_swp_release_mem_back(struct qbman_swp *s, 136 const struct qbman_release_desc *d, 137 const u64 *buffers, 138 unsigned int num_buffers); 139 140 /* Function pointers */ 141 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s, 142 const struct qbman_eq_desc *d, 143 const struct dpaa2_fd *fd) 144 = qbman_swp_enqueue_direct; 145 146 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s, 147 const struct qbman_eq_desc *d, 148 const struct dpaa2_fd *fd, 149 uint32_t *flags, 150 int num_frames) 151 = qbman_swp_enqueue_multiple_direct; 152 153 int 154 (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s, 155 const struct qbman_eq_desc *d, 156 const struct dpaa2_fd *fd, 157 int num_frames) 158 = qbman_swp_enqueue_multiple_desc_direct; 159 160 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d) 161 = qbman_swp_pull_direct; 162 163 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s) 164 = qbman_swp_dqrr_next_direct; 165 166 int (*qbman_swp_release_ptr)(struct qbman_swp *s, 167 const struct qbman_release_desc *d, 168 const u64 *buffers, 169 unsigned int num_buffers) 170 = qbman_swp_release_direct; 171 172 /* Portal Access */ 173 174 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset) 175 { 176 return readl_relaxed(p->addr_cinh + offset); 177 } 178 179 static inline void qbman_write_register(struct qbman_swp *p, u32 offset, 180 u32 value) 181 { 182 writel_relaxed(value, p->addr_cinh + offset); 183 } 184 185 static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset) 186 { 187 return p->addr_cena + offset; 188 } 189 190 #define QBMAN_CINH_SWP_CFG 0xd00 191 192 #define SWP_CFG_DQRR_MF_SHIFT 20 193 #define SWP_CFG_EST_SHIFT 16 194 #define SWP_CFG_CPBS_SHIFT 15 195 #define SWP_CFG_WN_SHIFT 14 196 #define SWP_CFG_RPM_SHIFT 12 197 #define SWP_CFG_DCM_SHIFT 10 198 #define SWP_CFG_EPM_SHIFT 8 199 #define SWP_CFG_VPM_SHIFT 7 200 #define SWP_CFG_CPM_SHIFT 6 201 #define SWP_CFG_SD_SHIFT 5 202 #define SWP_CFG_SP_SHIFT 4 203 #define SWP_CFG_SE_SHIFT 3 204 #define SWP_CFG_DP_SHIFT 2 205 #define SWP_CFG_DE_SHIFT 1 206 #define SWP_CFG_EP_SHIFT 0 207 208 static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, 209 u8 epm, int sd, int sp, int se, 210 int dp, int de, int ep) 211 { 212 return (max_fill << SWP_CFG_DQRR_MF_SHIFT | 213 est << SWP_CFG_EST_SHIFT | 214 wn << SWP_CFG_WN_SHIFT | 215 rpm << SWP_CFG_RPM_SHIFT | 216 dcm << SWP_CFG_DCM_SHIFT | 217 epm << SWP_CFG_EPM_SHIFT | 218 sd << SWP_CFG_SD_SHIFT | 219 sp << SWP_CFG_SP_SHIFT | 220 se << SWP_CFG_SE_SHIFT | 221 dp << SWP_CFG_DP_SHIFT | 222 de << SWP_CFG_DE_SHIFT | 223 ep << SWP_CFG_EP_SHIFT); 224 } 225 226 #define QMAN_RT_MODE 0x00000100 227 228 static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last) 229 { 230 /* 'first' is included, 'last' is excluded */ 231 if (first <= last) 232 return last - first; 233 else 234 return (2 * ringsize) - (first - last); 235 } 236 237 /** 238 * qbman_swp_init() - Create a functional object representing the given 239 * QBMan portal descriptor. 240 * @d: the given qbman swp descriptor 241 * 242 * Return qbman_swp portal for success, NULL if the object cannot 243 * be created. 244 */ 245 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 246 { 247 struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL); 248 u32 reg; 249 u32 mask_size; 250 u32 eqcr_pi; 251 252 if (!p) 253 return NULL; 254 255 spin_lock_init(&p->access_spinlock); 256 257 p->desc = d; 258 p->mc.valid_bit = QB_VALID_BIT; 259 p->sdq = 0; 260 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; 261 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; 262 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; 263 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 264 p->mr.valid_bit = QB_VALID_BIT; 265 266 atomic_set(&p->vdq.available, 1); 267 p->vdq.valid_bit = QB_VALID_BIT; 268 p->dqrr.next_idx = 0; 269 p->dqrr.valid_bit = QB_VALID_BIT; 270 271 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) { 272 p->dqrr.dqrr_size = 4; 273 p->dqrr.reset_bug = 1; 274 } else { 275 p->dqrr.dqrr_size = 8; 276 p->dqrr.reset_bug = 0; 277 } 278 279 p->addr_cena = d->cena_bar; 280 p->addr_cinh = d->cinh_bar; 281 282 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 283 284 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 285 1, /* Writes Non-cacheable */ 286 0, /* EQCR_CI stashing threshold */ 287 3, /* RPM: RCR in array mode */ 288 2, /* DCM: Discrete consumption ack */ 289 2, /* EPM: EQCR in ring mode */ 290 1, /* mem stashing drop enable enable */ 291 1, /* mem stashing priority enable */ 292 1, /* mem stashing enable */ 293 1, /* dequeue stashing priority enable */ 294 0, /* dequeue stashing enable enable */ 295 0); /* EQCR_CI stashing priority enable */ 296 } else { 297 memset(p->addr_cena, 0, 64 * 1024); 298 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 299 1, /* Writes Non-cacheable */ 300 1, /* EQCR_CI stashing threshold */ 301 3, /* RPM: RCR in array mode */ 302 2, /* DCM: Discrete consumption ack */ 303 0, /* EPM: EQCR in ring mode */ 304 1, /* mem stashing drop enable */ 305 1, /* mem stashing priority enable */ 306 1, /* mem stashing enable */ 307 1, /* dequeue stashing priority enable */ 308 0, /* dequeue stashing enable */ 309 0); /* EQCR_CI stashing priority enable */ 310 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ 311 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ 312 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ 313 } 314 315 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 316 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); 317 if (!reg) { 318 pr_err("qbman: the portal is not enabled!\n"); 319 kfree(p); 320 return NULL; 321 } 322 323 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 324 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE); 325 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE); 326 } 327 /* 328 * SDQCR needs to be initialized to 0 when no channels are 329 * being dequeued from or else the QMan HW will indicate an 330 * error. The values that were calculated above will be 331 * applied when dequeues from a specific channel are enabled. 332 */ 333 qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0); 334 335 p->eqcr.pi_ring_size = 8; 336 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 337 p->eqcr.pi_ring_size = 32; 338 qbman_swp_enqueue_ptr = 339 qbman_swp_enqueue_mem_back; 340 qbman_swp_enqueue_multiple_ptr = 341 qbman_swp_enqueue_multiple_mem_back; 342 qbman_swp_enqueue_multiple_desc_ptr = 343 qbman_swp_enqueue_multiple_desc_mem_back; 344 qbman_swp_pull_ptr = qbman_swp_pull_mem_back; 345 qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back; 346 qbman_swp_release_ptr = qbman_swp_release_mem_back; 347 } 348 349 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) 350 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; 351 eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI); 352 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; 353 p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT; 354 p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI) 355 & p->eqcr.pi_ci_mask; 356 p->eqcr.available = p->eqcr.pi_ring_size; 357 358 return p; 359 } 360 361 /** 362 * qbman_swp_finish() - Create and destroy a functional object representing 363 * the given QBMan portal descriptor. 364 * @p: the qbman_swp object to be destroyed 365 */ 366 void qbman_swp_finish(struct qbman_swp *p) 367 { 368 kfree(p); 369 } 370 371 /** 372 * qbman_swp_interrupt_read_status() 373 * @p: the given software portal 374 * 375 * Return the value in the SWP_ISR register. 376 */ 377 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p) 378 { 379 return qbman_read_register(p, QBMAN_CINH_SWP_ISR); 380 } 381 382 /** 383 * qbman_swp_interrupt_clear_status() 384 * @p: the given software portal 385 * @mask: The mask to clear in SWP_ISR register 386 */ 387 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask) 388 { 389 qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask); 390 } 391 392 /** 393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register 394 * @p: the given software portal 395 * 396 * Return the value in the SWP_IER register. 397 */ 398 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p) 399 { 400 return qbman_read_register(p, QBMAN_CINH_SWP_IER); 401 } 402 403 /** 404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp 405 * @p: the given software portal 406 * @mask: The mask of bits to enable in SWP_IER 407 */ 408 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask) 409 { 410 qbman_write_register(p, QBMAN_CINH_SWP_IER, mask); 411 } 412 413 /** 414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register 415 * @p: the given software portal object 416 * 417 * Return the value in the SWP_IIR register. 418 */ 419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) 420 { 421 return qbman_read_register(p, QBMAN_CINH_SWP_IIR); 422 } 423 424 /** 425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register 426 * @p: the given software portal object 427 * @mask: The mask to set in SWP_IIR register 428 */ 429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) 430 { 431 qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); 432 } 433 434 /* 435 * Different management commands all use this common base layer of code to issue 436 * commands and poll for results. 437 */ 438 439 /* 440 * Returns a pointer to where the caller should fill in their management command 441 * (caller should ignore the verb byte) 442 */ 443 void *qbman_swp_mc_start(struct qbman_swp *p) 444 { 445 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 446 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 447 else 448 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM); 449 } 450 451 /* 452 * Commits merges in the caller-supplied command verb (which should not include 453 * the valid-bit) and submits the command to hardware 454 */ 455 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb) 456 { 457 u8 *v = cmd; 458 459 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 460 dma_wmb(); 461 *v = cmd_verb | p->mc.valid_bit; 462 } else { 463 *v = cmd_verb | p->mc.valid_bit; 464 dma_wmb(); 465 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE); 466 } 467 } 468 469 /* 470 * Checks for a completed response (returns non-NULL if only if the response 471 * is complete). 472 */ 473 void *qbman_swp_mc_result(struct qbman_swp *p) 474 { 475 u32 *ret, verb; 476 477 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 478 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 479 /* Remove the valid-bit - command completed if the rest 480 * is non-zero. 481 */ 482 verb = ret[0] & ~QB_VALID_BIT; 483 if (!verb) 484 return NULL; 485 p->mc.valid_bit ^= QB_VALID_BIT; 486 } else { 487 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM); 488 /* Command completed if the valid bit is toggled */ 489 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT)) 490 return NULL; 491 /* Command completed if the rest is non-zero */ 492 verb = ret[0] & ~QB_VALID_BIT; 493 if (!verb) 494 return NULL; 495 p->mr.valid_bit ^= QB_VALID_BIT; 496 } 497 498 return ret; 499 } 500 501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0 502 enum qb_enqueue_commands { 503 enqueue_empty = 0, 504 enqueue_response_always = 1, 505 enqueue_rejects_to_fq = 2 506 }; 507 508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2 509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3 510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4 511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7 512 513 /** 514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to 515 * default/starting state. 516 */ 517 void qbman_eq_desc_clear(struct qbman_eq_desc *d) 518 { 519 memset(d, 0, sizeof(*d)); 520 } 521 522 /** 523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp 524 * @d: the enqueue descriptor. 525 * @response_success: 1 = enqueue with response always; 0 = enqueue with 526 * rejections returned on a FQ. 527 */ 528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) 529 { 530 d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT); 531 if (respond_success) 532 d->verb |= enqueue_response_always; 533 else 534 d->verb |= enqueue_rejects_to_fq; 535 } 536 537 /* 538 * Exactly one of the following descriptor "targets" should be set. (Calling any 539 * one of these will replace the effect of any prior call to one of these.) 540 * -enqueue to a frame queue 541 * -enqueue to a queuing destination 542 */ 543 544 /** 545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command 546 * @d: the enqueue descriptor 547 * @fqid: the id of the frame queue to be enqueued 548 */ 549 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid) 550 { 551 d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT); 552 d->tgtid = cpu_to_le32(fqid); 553 } 554 555 /** 556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command 557 * @d: the enqueue descriptor 558 * @qdid: the id of the queuing destination to be enqueued 559 * @qd_bin: the queuing destination bin 560 * @qd_prio: the queuing destination priority 561 */ 562 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid, 563 u32 qd_bin, u32 qd_prio) 564 { 565 d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT; 566 d->tgtid = cpu_to_le32(qdid); 567 d->qdbin = cpu_to_le16(qd_bin); 568 d->qpri = qd_prio; 569 } 570 571 #define EQAR_IDX(eqar) ((eqar) & 0x7) 572 #define EQAR_VB(eqar) ((eqar) & 0x80) 573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 574 575 #define QB_RT_BIT ((u32)0x100) 576 /** 577 * qbman_swp_enqueue_direct() - Issue an enqueue command 578 * @s: the software portal used for enqueue 579 * @d: the enqueue descriptor 580 * @fd: the frame descriptor to be enqueued 581 * 582 * Please note that 'fd' should only be NULL if the "action" of the 583 * descriptor is "orp_hole" or "orp_nesn". 584 * 585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 586 */ 587 static 588 int qbman_swp_enqueue_direct(struct qbman_swp *s, 589 const struct qbman_eq_desc *d, 590 const struct dpaa2_fd *fd) 591 { 592 int flags = 0; 593 int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1); 594 595 if (ret >= 0) 596 ret = 0; 597 else 598 ret = -EBUSY; 599 return ret; 600 } 601 602 /** 603 * qbman_swp_enqueue_mem_back() - Issue an enqueue command 604 * @s: the software portal used for enqueue 605 * @d: the enqueue descriptor 606 * @fd: the frame descriptor to be enqueued 607 * 608 * Please note that 'fd' should only be NULL if the "action" of the 609 * descriptor is "orp_hole" or "orp_nesn". 610 * 611 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. 612 */ 613 static 614 int qbman_swp_enqueue_mem_back(struct qbman_swp *s, 615 const struct qbman_eq_desc *d, 616 const struct dpaa2_fd *fd) 617 { 618 int flags = 0; 619 int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1); 620 621 if (ret >= 0) 622 ret = 0; 623 else 624 ret = -EBUSY; 625 return ret; 626 } 627 628 /** 629 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command 630 * using one enqueue descriptor 631 * @s: the software portal used for enqueue 632 * @d: the enqueue descriptor 633 * @fd: table pointer of frame descriptor table to be enqueued 634 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 635 * @num_frames: number of fd to be enqueued 636 * 637 * Return the number of fd enqueued, or a negative error number. 638 */ 639 static 640 int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, 641 const struct qbman_eq_desc *d, 642 const struct dpaa2_fd *fd, 643 uint32_t *flags, 644 int num_frames) 645 { 646 uint32_t *p = NULL; 647 const uint32_t *cl = (uint32_t *)d; 648 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 649 int i, num_enqueued = 0; 650 uint64_t addr_cena; 651 652 spin_lock(&s->access_spinlock); 653 half_mask = (s->eqcr.pi_ci_mask>>1); 654 full_mask = s->eqcr.pi_ci_mask; 655 656 if (!s->eqcr.available) { 657 eqcr_ci = s->eqcr.ci; 658 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 659 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 660 661 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 662 eqcr_ci, s->eqcr.ci); 663 if (!s->eqcr.available) { 664 spin_unlock(&s->access_spinlock); 665 return 0; 666 } 667 } 668 669 eqcr_pi = s->eqcr.pi; 670 num_enqueued = (s->eqcr.available < num_frames) ? 671 s->eqcr.available : num_frames; 672 s->eqcr.available -= num_enqueued; 673 /* Fill in the EQCR ring */ 674 for (i = 0; i < num_enqueued; i++) { 675 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 676 /* Skip copying the verb */ 677 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 678 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 679 &fd[i], sizeof(*fd)); 680 eqcr_pi++; 681 } 682 683 dma_wmb(); 684 685 /* Set the verb byte, have to substitute in the valid-bit */ 686 eqcr_pi = s->eqcr.pi; 687 for (i = 0; i < num_enqueued; i++) { 688 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 689 p[0] = cl[0] | s->eqcr.pi_vb; 690 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 691 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 692 693 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 694 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 695 } 696 eqcr_pi++; 697 if (!(eqcr_pi & half_mask)) 698 s->eqcr.pi_vb ^= QB_VALID_BIT; 699 } 700 701 /* Flush all the cacheline without load/store in between */ 702 eqcr_pi = s->eqcr.pi; 703 addr_cena = (size_t)s->addr_cena; 704 for (i = 0; i < num_enqueued; i++) 705 eqcr_pi++; 706 s->eqcr.pi = eqcr_pi & full_mask; 707 spin_unlock(&s->access_spinlock); 708 709 return num_enqueued; 710 } 711 712 /** 713 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command 714 * using one enqueue descriptor 715 * @s: the software portal used for enqueue 716 * @d: the enqueue descriptor 717 * @fd: table pointer of frame descriptor table to be enqueued 718 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL 719 * @num_frames: number of fd to be enqueued 720 * 721 * Return the number of fd enqueued, or a negative error number. 722 */ 723 static 724 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, 725 const struct qbman_eq_desc *d, 726 const struct dpaa2_fd *fd, 727 uint32_t *flags, 728 int num_frames) 729 { 730 uint32_t *p = NULL; 731 const uint32_t *cl = (uint32_t *)(d); 732 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 733 int i, num_enqueued = 0; 734 unsigned long irq_flags; 735 736 spin_lock(&s->access_spinlock); 737 local_irq_save(irq_flags); 738 739 half_mask = (s->eqcr.pi_ci_mask>>1); 740 full_mask = s->eqcr.pi_ci_mask; 741 if (!s->eqcr.available) { 742 eqcr_ci = s->eqcr.ci; 743 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 744 s->eqcr.ci = *p & full_mask; 745 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 746 eqcr_ci, s->eqcr.ci); 747 if (!s->eqcr.available) { 748 local_irq_restore(irq_flags); 749 spin_unlock(&s->access_spinlock); 750 return 0; 751 } 752 } 753 754 eqcr_pi = s->eqcr.pi; 755 num_enqueued = (s->eqcr.available < num_frames) ? 756 s->eqcr.available : num_frames; 757 s->eqcr.available -= num_enqueued; 758 /* Fill in the EQCR ring */ 759 for (i = 0; i < num_enqueued; i++) { 760 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 761 /* Skip copying the verb */ 762 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 763 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 764 &fd[i], sizeof(*fd)); 765 eqcr_pi++; 766 } 767 768 /* Set the verb byte, have to substitute in the valid-bit */ 769 eqcr_pi = s->eqcr.pi; 770 for (i = 0; i < num_enqueued; i++) { 771 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 772 p[0] = cl[0] | s->eqcr.pi_vb; 773 if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { 774 struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; 775 776 d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | 777 ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); 778 } 779 eqcr_pi++; 780 if (!(eqcr_pi & half_mask)) 781 s->eqcr.pi_vb ^= QB_VALID_BIT; 782 } 783 s->eqcr.pi = eqcr_pi & full_mask; 784 785 dma_wmb(); 786 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 787 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 788 local_irq_restore(irq_flags); 789 spin_unlock(&s->access_spinlock); 790 791 return num_enqueued; 792 } 793 794 /** 795 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command 796 * using multiple enqueue descriptor 797 * @s: the software portal used for enqueue 798 * @d: table of minimal enqueue descriptor 799 * @fd: table pointer of frame descriptor table to be enqueued 800 * @num_frames: number of fd to be enqueued 801 * 802 * Return the number of fd enqueued, or a negative error number. 803 */ 804 static 805 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, 806 const struct qbman_eq_desc *d, 807 const struct dpaa2_fd *fd, 808 int num_frames) 809 { 810 uint32_t *p; 811 const uint32_t *cl; 812 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 813 int i, num_enqueued = 0; 814 815 half_mask = (s->eqcr.pi_ci_mask>>1); 816 full_mask = s->eqcr.pi_ci_mask; 817 if (!s->eqcr.available) { 818 eqcr_ci = s->eqcr.ci; 819 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI; 820 s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI); 821 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 822 eqcr_ci, s->eqcr.ci); 823 if (!s->eqcr.available) 824 return 0; 825 } 826 827 eqcr_pi = s->eqcr.pi; 828 num_enqueued = (s->eqcr.available < num_frames) ? 829 s->eqcr.available : num_frames; 830 s->eqcr.available -= num_enqueued; 831 /* Fill in the EQCR ring */ 832 for (i = 0; i < num_enqueued; i++) { 833 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 834 cl = (uint32_t *)(&d[i]); 835 /* Skip copying the verb */ 836 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 837 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 838 &fd[i], sizeof(*fd)); 839 eqcr_pi++; 840 } 841 842 dma_wmb(); 843 844 /* Set the verb byte, have to substitute in the valid-bit */ 845 eqcr_pi = s->eqcr.pi; 846 for (i = 0; i < num_enqueued; i++) { 847 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 848 cl = (uint32_t *)(&d[i]); 849 p[0] = cl[0] | s->eqcr.pi_vb; 850 eqcr_pi++; 851 if (!(eqcr_pi & half_mask)) 852 s->eqcr.pi_vb ^= QB_VALID_BIT; 853 } 854 855 /* Flush all the cacheline without load/store in between */ 856 eqcr_pi = s->eqcr.pi; 857 for (i = 0; i < num_enqueued; i++) 858 eqcr_pi++; 859 s->eqcr.pi = eqcr_pi & full_mask; 860 861 return num_enqueued; 862 } 863 864 /** 865 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command 866 * using multiple enqueue descriptor 867 * @s: the software portal used for enqueue 868 * @d: table of minimal enqueue descriptor 869 * @fd: table pointer of frame descriptor table to be enqueued 870 * @num_frames: number of fd to be enqueued 871 * 872 * Return the number of fd enqueued, or a negative error number. 873 */ 874 static 875 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, 876 const struct qbman_eq_desc *d, 877 const struct dpaa2_fd *fd, 878 int num_frames) 879 { 880 uint32_t *p; 881 const uint32_t *cl; 882 uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; 883 int i, num_enqueued = 0; 884 885 half_mask = (s->eqcr.pi_ci_mask>>1); 886 full_mask = s->eqcr.pi_ci_mask; 887 if (!s->eqcr.available) { 888 eqcr_ci = s->eqcr.ci; 889 p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK; 890 s->eqcr.ci = *p & full_mask; 891 s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, 892 eqcr_ci, s->eqcr.ci); 893 if (!s->eqcr.available) 894 return 0; 895 } 896 897 eqcr_pi = s->eqcr.pi; 898 num_enqueued = (s->eqcr.available < num_frames) ? 899 s->eqcr.available : num_frames; 900 s->eqcr.available -= num_enqueued; 901 /* Fill in the EQCR ring */ 902 for (i = 0; i < num_enqueued; i++) { 903 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 904 cl = (uint32_t *)(&d[i]); 905 /* Skip copying the verb */ 906 memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); 907 memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], 908 &fd[i], sizeof(*fd)); 909 eqcr_pi++; 910 } 911 912 /* Set the verb byte, have to substitute in the valid-bit */ 913 eqcr_pi = s->eqcr.pi; 914 for (i = 0; i < num_enqueued; i++) { 915 p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); 916 cl = (uint32_t *)(&d[i]); 917 p[0] = cl[0] | s->eqcr.pi_vb; 918 eqcr_pi++; 919 if (!(eqcr_pi & half_mask)) 920 s->eqcr.pi_vb ^= QB_VALID_BIT; 921 } 922 923 s->eqcr.pi = eqcr_pi & full_mask; 924 925 dma_wmb(); 926 qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, 927 (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); 928 929 return num_enqueued; 930 } 931 932 /* Static (push) dequeue */ 933 934 /** 935 * qbman_swp_push_get() - Get the push dequeue setup 936 * @p: the software portal object 937 * @channel_idx: the channel index to query 938 * @enabled: returned boolean to show whether the push dequeue is enabled 939 * for the given channel 940 */ 941 void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled) 942 { 943 u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; 944 945 WARN_ON(channel_idx > 15); 946 *enabled = src | (1 << channel_idx); 947 } 948 949 /** 950 * qbman_swp_push_set() - Enable or disable push dequeue 951 * @p: the software portal object 952 * @channel_idx: the channel index (0 to 15) 953 * @enable: enable or disable push dequeue 954 */ 955 void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable) 956 { 957 u16 dqsrc; 958 959 WARN_ON(channel_idx > 15); 960 if (enable) 961 s->sdq |= 1 << channel_idx; 962 else 963 s->sdq &= ~(1 << channel_idx); 964 965 /* Read make the complete src map. If no channels are enabled 966 * the SDQCR must be 0 or else QMan will assert errors 967 */ 968 dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; 969 if (dqsrc != 0) 970 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq); 971 else 972 qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0); 973 } 974 975 #define QB_VDQCR_VERB_DCT_SHIFT 0 976 #define QB_VDQCR_VERB_DT_SHIFT 2 977 #define QB_VDQCR_VERB_RLS_SHIFT 4 978 #define QB_VDQCR_VERB_WAE_SHIFT 5 979 980 enum qb_pull_dt_e { 981 qb_pull_dt_channel, 982 qb_pull_dt_workqueue, 983 qb_pull_dt_framequeue 984 }; 985 986 /** 987 * qbman_pull_desc_clear() - Clear the contents of a descriptor to 988 * default/starting state 989 * @d: the pull dequeue descriptor to be cleared 990 */ 991 void qbman_pull_desc_clear(struct qbman_pull_desc *d) 992 { 993 memset(d, 0, sizeof(*d)); 994 } 995 996 /** 997 * qbman_pull_desc_set_storage()- Set the pull dequeue storage 998 * @d: the pull dequeue descriptor to be set 999 * @storage: the pointer of the memory to store the dequeue result 1000 * @storage_phys: the physical address of the storage memory 1001 * @stash: to indicate whether write allocate is enabled 1002 * 1003 * If not called, or if called with 'storage' as NULL, the result pull dequeues 1004 * will produce results to DQRR. If 'storage' is non-NULL, then results are 1005 * produced to the given memory location (using the DMA address which 1006 * the caller provides in 'storage_phys'), and 'stash' controls whether or not 1007 * those writes to main-memory express a cache-warming attribute. 1008 */ 1009 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, 1010 struct dpaa2_dq *storage, 1011 dma_addr_t storage_phys, 1012 int stash) 1013 { 1014 /* save the virtual address */ 1015 d->rsp_addr_virt = (u64)(uintptr_t)storage; 1016 1017 if (!storage) { 1018 d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT); 1019 return; 1020 } 1021 d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT; 1022 if (stash) 1023 d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT; 1024 else 1025 d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT); 1026 1027 d->rsp_addr = cpu_to_le64(storage_phys); 1028 } 1029 1030 /** 1031 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued 1032 * @d: the pull dequeue descriptor to be set 1033 * @numframes: number of frames to be set, must be between 1 and 16, inclusive 1034 */ 1035 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes) 1036 { 1037 d->numf = numframes - 1; 1038 } 1039 1040 /* 1041 * Exactly one of the following descriptor "actions" should be set. (Calling any 1042 * one of these will replace the effect of any prior call to one of these.) 1043 * - pull dequeue from the given frame queue (FQ) 1044 * - pull dequeue from any FQ in the given work queue (WQ) 1045 * - pull dequeue from any FQ in any WQ in the given channel 1046 */ 1047 1048 /** 1049 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues 1050 * @fqid: the frame queue index of the given FQ 1051 */ 1052 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid) 1053 { 1054 d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT; 1055 d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT; 1056 d->dq_src = cpu_to_le32(fqid); 1057 } 1058 1059 /** 1060 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues 1061 * @wqid: composed of channel id and wqid within the channel 1062 * @dct: the dequeue command type 1063 */ 1064 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, 1065 enum qbman_pull_type_e dct) 1066 { 1067 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; 1068 d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT; 1069 d->dq_src = cpu_to_le32(wqid); 1070 } 1071 1072 /** 1073 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command 1074 * dequeues 1075 * @chid: the channel id to be dequeued 1076 * @dct: the dequeue command type 1077 */ 1078 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, 1079 enum qbman_pull_type_e dct) 1080 { 1081 d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT; 1082 d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT; 1083 d->dq_src = cpu_to_le32(chid); 1084 } 1085 1086 /** 1087 * qbman_swp_pull_direct() - Issue the pull dequeue command 1088 * @s: the software portal object 1089 * @d: the software portal descriptor which has been configured with 1090 * the set of qbman_pull_desc_set_*() calls 1091 * 1092 * Return 0 for success, and -EBUSY if the software portal is not ready 1093 * to do pull dequeue. 1094 */ 1095 static 1096 int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d) 1097 { 1098 struct qbman_pull_desc *p; 1099 1100 if (!atomic_dec_and_test(&s->vdq.available)) { 1101 atomic_inc(&s->vdq.available); 1102 return -EBUSY; 1103 } 1104 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 1105 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1106 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 1107 else 1108 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 1109 p->numf = d->numf; 1110 p->tok = QMAN_DQ_TOKEN_VALID; 1111 p->dq_src = d->dq_src; 1112 p->rsp_addr = d->rsp_addr; 1113 p->rsp_addr_virt = d->rsp_addr_virt; 1114 dma_wmb(); 1115 /* Set the verb byte, have to substitute in the valid-bit */ 1116 p->verb = d->verb | s->vdq.valid_bit; 1117 s->vdq.valid_bit ^= QB_VALID_BIT; 1118 1119 return 0; 1120 } 1121 1122 /** 1123 * qbman_swp_pull_mem_back() - Issue the pull dequeue command 1124 * @s: the software portal object 1125 * @d: the software portal descriptor which has been configured with 1126 * the set of qbman_pull_desc_set_*() calls 1127 * 1128 * Return 0 for success, and -EBUSY if the software portal is not ready 1129 * to do pull dequeue. 1130 */ 1131 static 1132 int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d) 1133 { 1134 struct qbman_pull_desc *p; 1135 1136 if (!atomic_dec_and_test(&s->vdq.available)) { 1137 atomic_inc(&s->vdq.available); 1138 return -EBUSY; 1139 } 1140 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 1141 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 1142 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 1143 else 1144 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 1145 p->numf = d->numf; 1146 p->tok = QMAN_DQ_TOKEN_VALID; 1147 p->dq_src = d->dq_src; 1148 p->rsp_addr = d->rsp_addr; 1149 p->rsp_addr_virt = d->rsp_addr_virt; 1150 1151 /* Set the verb byte, have to substitute in the valid-bit */ 1152 p->verb = d->verb | s->vdq.valid_bit; 1153 s->vdq.valid_bit ^= QB_VALID_BIT; 1154 dma_wmb(); 1155 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); 1156 1157 return 0; 1158 } 1159 1160 #define QMAN_DQRR_PI_MASK 0xf 1161 1162 /** 1163 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry 1164 * @s: the software portal object 1165 * 1166 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 1167 * only once, so repeated calls can return a sequence of DQRR entries, without 1168 * requiring they be consumed immediately or in any particular order. 1169 */ 1170 const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s) 1171 { 1172 u32 verb; 1173 u32 response_verb; 1174 u32 flags; 1175 struct dpaa2_dq *p; 1176 1177 /* Before using valid-bit to detect if something is there, we have to 1178 * handle the case of the DQRR reset bug... 1179 */ 1180 if (unlikely(s->dqrr.reset_bug)) { 1181 /* 1182 * We pick up new entries by cache-inhibited producer index, 1183 * which means that a non-coherent mapping would require us to 1184 * invalidate and read *only* once that PI has indicated that 1185 * there's an entry here. The first trip around the DQRR ring 1186 * will be much less efficient than all subsequent trips around 1187 * it... 1188 */ 1189 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & 1190 QMAN_DQRR_PI_MASK; 1191 1192 /* there are new entries if pi != next_idx */ 1193 if (pi == s->dqrr.next_idx) 1194 return NULL; 1195 1196 /* 1197 * if next_idx is/was the last ring index, and 'pi' is 1198 * different, we can disable the workaround as all the ring 1199 * entries have now been DMA'd to so valid-bit checking is 1200 * repaired. Note: this logic needs to be based on next_idx 1201 * (which increments one at a time), rather than on pi (which 1202 * can burst and wrap-around between our snapshots of it). 1203 */ 1204 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { 1205 pr_debug("next_idx=%d, pi=%d, clear reset bug\n", 1206 s->dqrr.next_idx, pi); 1207 s->dqrr.reset_bug = 0; 1208 } 1209 prefetch(qbman_get_cmd(s, 1210 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1211 } 1212 1213 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 1214 verb = p->dq.verb; 1215 1216 /* 1217 * If the valid-bit isn't of the expected polarity, nothing there. Note, 1218 * in the DQRR reset bug workaround, we shouldn't need to skip these 1219 * check, because we've already determined that a new entry is available 1220 * and we've invalidated the cacheline before reading it, so the 1221 * valid-bit behaviour is repaired and should tell us what we already 1222 * knew from reading PI. 1223 */ 1224 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 1225 prefetch(qbman_get_cmd(s, 1226 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1227 return NULL; 1228 } 1229 /* 1230 * There's something there. Move "next_idx" attention to the next ring 1231 * entry (and prefetch it) before returning what we found. 1232 */ 1233 s->dqrr.next_idx++; 1234 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ 1235 if (!s->dqrr.next_idx) 1236 s->dqrr.valid_bit ^= QB_VALID_BIT; 1237 1238 /* 1239 * If this is the final response to a volatile dequeue command 1240 * indicate that the vdq is available 1241 */ 1242 flags = p->dq.stat; 1243 response_verb = verb & QBMAN_RESULT_MASK; 1244 if ((response_verb == QBMAN_RESULT_DQ) && 1245 (flags & DPAA2_DQ_STAT_VOLATILE) && 1246 (flags & DPAA2_DQ_STAT_EXPIRED)) 1247 atomic_inc(&s->vdq.available); 1248 1249 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1250 1251 return p; 1252 } 1253 1254 /** 1255 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry 1256 * @s: the software portal object 1257 * 1258 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry 1259 * only once, so repeated calls can return a sequence of DQRR entries, without 1260 * requiring they be consumed immediately or in any particular order. 1261 */ 1262 const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s) 1263 { 1264 u32 verb; 1265 u32 response_verb; 1266 u32 flags; 1267 struct dpaa2_dq *p; 1268 1269 /* Before using valid-bit to detect if something is there, we have to 1270 * handle the case of the DQRR reset bug... 1271 */ 1272 if (unlikely(s->dqrr.reset_bug)) { 1273 /* 1274 * We pick up new entries by cache-inhibited producer index, 1275 * which means that a non-coherent mapping would require us to 1276 * invalidate and read *only* once that PI has indicated that 1277 * there's an entry here. The first trip around the DQRR ring 1278 * will be much less efficient than all subsequent trips around 1279 * it... 1280 */ 1281 u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) & 1282 QMAN_DQRR_PI_MASK; 1283 1284 /* there are new entries if pi != next_idx */ 1285 if (pi == s->dqrr.next_idx) 1286 return NULL; 1287 1288 /* 1289 * if next_idx is/was the last ring index, and 'pi' is 1290 * different, we can disable the workaround as all the ring 1291 * entries have now been DMA'd to so valid-bit checking is 1292 * repaired. Note: this logic needs to be based on next_idx 1293 * (which increments one at a time), rather than on pi (which 1294 * can burst and wrap-around between our snapshots of it). 1295 */ 1296 if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { 1297 pr_debug("next_idx=%d, pi=%d, clear reset bug\n", 1298 s->dqrr.next_idx, pi); 1299 s->dqrr.reset_bug = 0; 1300 } 1301 prefetch(qbman_get_cmd(s, 1302 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1303 } 1304 1305 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); 1306 verb = p->dq.verb; 1307 1308 /* 1309 * If the valid-bit isn't of the expected polarity, nothing there. Note, 1310 * in the DQRR reset bug workaround, we shouldn't need to skip these 1311 * check, because we've already determined that a new entry is available 1312 * and we've invalidated the cacheline before reading it, so the 1313 * valid-bit behaviour is repaired and should tell us what we already 1314 * knew from reading PI. 1315 */ 1316 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 1317 prefetch(qbman_get_cmd(s, 1318 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1319 return NULL; 1320 } 1321 /* 1322 * There's something there. Move "next_idx" attention to the next ring 1323 * entry (and prefetch it) before returning what we found. 1324 */ 1325 s->dqrr.next_idx++; 1326 s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ 1327 if (!s->dqrr.next_idx) 1328 s->dqrr.valid_bit ^= QB_VALID_BIT; 1329 1330 /* 1331 * If this is the final response to a volatile dequeue command 1332 * indicate that the vdq is available 1333 */ 1334 flags = p->dq.stat; 1335 response_verb = verb & QBMAN_RESULT_MASK; 1336 if ((response_verb == QBMAN_RESULT_DQ) && 1337 (flags & DPAA2_DQ_STAT_VOLATILE) && 1338 (flags & DPAA2_DQ_STAT_EXPIRED)) 1339 atomic_inc(&s->vdq.available); 1340 1341 prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 1342 1343 return p; 1344 } 1345 1346 /** 1347 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from 1348 * qbman_swp_dqrr_next(). 1349 * @s: the software portal object 1350 * @dq: the DQRR entry to be consumed 1351 */ 1352 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) 1353 { 1354 qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); 1355 } 1356 1357 /** 1358 * qbman_result_has_new_result() - Check and get the dequeue response from the 1359 * dq storage memory set in pull dequeue command 1360 * @s: the software portal object 1361 * @dq: the dequeue result read from the memory 1362 * 1363 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid 1364 * dequeue result. 1365 * 1366 * Only used for user-provided storage of dequeue results, not DQRR. For 1367 * efficiency purposes, the driver will perform any required endianness 1368 * conversion to ensure that the user's dequeue result storage is in host-endian 1369 * format. As such, once the user has called qbman_result_has_new_result() and 1370 * been returned a valid dequeue result, they should not call it again on 1371 * the same memory location (except of course if another dequeue command has 1372 * been executed to produce a new result to that location). 1373 */ 1374 int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq) 1375 { 1376 if (dq->dq.tok != QMAN_DQ_TOKEN_VALID) 1377 return 0; 1378 1379 /* 1380 * Set token to be 0 so we will detect change back to 1 1381 * next time the looping is traversed. Const is cast away here 1382 * as we want users to treat the dequeue responses as read only. 1383 */ 1384 ((struct dpaa2_dq *)dq)->dq.tok = 0; 1385 1386 /* 1387 * Determine whether VDQCR is available based on whether the 1388 * current result is sitting in the first storage location of 1389 * the busy command. 1390 */ 1391 if (s->vdq.storage == dq) { 1392 s->vdq.storage = NULL; 1393 atomic_inc(&s->vdq.available); 1394 } 1395 1396 return 1; 1397 } 1398 1399 /** 1400 * qbman_release_desc_clear() - Clear the contents of a descriptor to 1401 * default/starting state. 1402 */ 1403 void qbman_release_desc_clear(struct qbman_release_desc *d) 1404 { 1405 memset(d, 0, sizeof(*d)); 1406 d->verb = 1 << 5; /* Release Command Valid */ 1407 } 1408 1409 /** 1410 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to 1411 */ 1412 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid) 1413 { 1414 d->bpid = cpu_to_le16(bpid); 1415 } 1416 1417 /** 1418 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI 1419 * interrupt source should be asserted after the release command is completed. 1420 */ 1421 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) 1422 { 1423 if (enable) 1424 d->verb |= 1 << 6; 1425 else 1426 d->verb &= ~(1 << 6); 1427 } 1428 1429 #define RAR_IDX(rar) ((rar) & 0x7) 1430 #define RAR_VB(rar) ((rar) & 0x80) 1431 #define RAR_SUCCESS(rar) ((rar) & 0x100) 1432 1433 /** 1434 * qbman_swp_release_direct() - Issue a buffer release command 1435 * @s: the software portal object 1436 * @d: the release descriptor 1437 * @buffers: a pointer pointing to the buffer address to be released 1438 * @num_buffers: number of buffers to be released, must be less than 8 1439 * 1440 * Return 0 for success, -EBUSY if the release command ring is not ready. 1441 */ 1442 int qbman_swp_release_direct(struct qbman_swp *s, 1443 const struct qbman_release_desc *d, 1444 const u64 *buffers, unsigned int num_buffers) 1445 { 1446 int i; 1447 struct qbman_release_desc *p; 1448 u32 rar; 1449 1450 if (!num_buffers || (num_buffers > 7)) 1451 return -EINVAL; 1452 1453 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); 1454 if (!RAR_SUCCESS(rar)) 1455 return -EBUSY; 1456 1457 /* Start the release command */ 1458 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 1459 1460 /* Copy the caller's buffer pointers to the command */ 1461 for (i = 0; i < num_buffers; i++) 1462 p->buf[i] = cpu_to_le64(buffers[i]); 1463 p->bpid = d->bpid; 1464 1465 /* 1466 * Set the verb byte, have to substitute in the valid-bit 1467 * and the number of buffers. 1468 */ 1469 dma_wmb(); 1470 p->verb = d->verb | RAR_VB(rar) | num_buffers; 1471 1472 return 0; 1473 } 1474 1475 /** 1476 * qbman_swp_release_mem_back() - Issue a buffer release command 1477 * @s: the software portal object 1478 * @d: the release descriptor 1479 * @buffers: a pointer pointing to the buffer address to be released 1480 * @num_buffers: number of buffers to be released, must be less than 8 1481 * 1482 * Return 0 for success, -EBUSY if the release command ring is not ready. 1483 */ 1484 int qbman_swp_release_mem_back(struct qbman_swp *s, 1485 const struct qbman_release_desc *d, 1486 const u64 *buffers, unsigned int num_buffers) 1487 { 1488 int i; 1489 struct qbman_release_desc *p; 1490 u32 rar; 1491 1492 if (!num_buffers || (num_buffers > 7)) 1493 return -EINVAL; 1494 1495 rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); 1496 if (!RAR_SUCCESS(rar)) 1497 return -EBUSY; 1498 1499 /* Start the release command */ 1500 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); 1501 1502 /* Copy the caller's buffer pointers to the command */ 1503 for (i = 0; i < num_buffers; i++) 1504 p->buf[i] = cpu_to_le64(buffers[i]); 1505 p->bpid = d->bpid; 1506 1507 p->verb = d->verb | RAR_VB(rar) | num_buffers; 1508 dma_wmb(); 1509 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + 1510 RAR_IDX(rar) * 4, QMAN_RT_MODE); 1511 1512 return 0; 1513 } 1514 1515 struct qbman_acquire_desc { 1516 u8 verb; 1517 u8 reserved; 1518 __le16 bpid; 1519 u8 num; 1520 u8 reserved2[59]; 1521 }; 1522 1523 struct qbman_acquire_rslt { 1524 u8 verb; 1525 u8 rslt; 1526 __le16 reserved; 1527 u8 num; 1528 u8 reserved2[3]; 1529 __le64 buf[7]; 1530 }; 1531 1532 /** 1533 * qbman_swp_acquire() - Issue a buffer acquire command 1534 * @s: the software portal object 1535 * @bpid: the buffer pool index 1536 * @buffers: a pointer pointing to the acquired buffer addresses 1537 * @num_buffers: number of buffers to be acquired, must be less than 8 1538 * 1539 * Return 0 for success, or negative error code if the acquire command 1540 * fails. 1541 */ 1542 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, 1543 unsigned int num_buffers) 1544 { 1545 struct qbman_acquire_desc *p; 1546 struct qbman_acquire_rslt *r; 1547 int i; 1548 1549 if (!num_buffers || (num_buffers > 7)) 1550 return -EINVAL; 1551 1552 /* Start the management command */ 1553 p = qbman_swp_mc_start(s); 1554 1555 if (!p) 1556 return -EBUSY; 1557 1558 /* Encode the caller-provided attributes */ 1559 p->bpid = cpu_to_le16(bpid); 1560 p->num = num_buffers; 1561 1562 /* Complete the management command */ 1563 r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE); 1564 if (unlikely(!r)) { 1565 pr_err("qbman: acquire from BPID %d failed, no response\n", 1566 bpid); 1567 return -EIO; 1568 } 1569 1570 /* Decode the outcome */ 1571 WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE); 1572 1573 /* Determine success or failure */ 1574 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1575 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n", 1576 bpid, r->rslt); 1577 return -EIO; 1578 } 1579 1580 WARN_ON(r->num > num_buffers); 1581 1582 /* Copy the acquired buffers to the caller's array */ 1583 for (i = 0; i < r->num; i++) 1584 buffers[i] = le64_to_cpu(r->buf[i]); 1585 1586 return (int)r->num; 1587 } 1588 1589 struct qbman_alt_fq_state_desc { 1590 u8 verb; 1591 u8 reserved[3]; 1592 __le32 fqid; 1593 u8 reserved2[56]; 1594 }; 1595 1596 struct qbman_alt_fq_state_rslt { 1597 u8 verb; 1598 u8 rslt; 1599 u8 reserved[62]; 1600 }; 1601 1602 #define ALT_FQ_FQID_MASK 0x00FFFFFF 1603 1604 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid, 1605 u8 alt_fq_verb) 1606 { 1607 struct qbman_alt_fq_state_desc *p; 1608 struct qbman_alt_fq_state_rslt *r; 1609 1610 /* Start the management command */ 1611 p = qbman_swp_mc_start(s); 1612 if (!p) 1613 return -EBUSY; 1614 1615 p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK); 1616 1617 /* Complete the management command */ 1618 r = qbman_swp_mc_complete(s, p, alt_fq_verb); 1619 if (unlikely(!r)) { 1620 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n", 1621 alt_fq_verb); 1622 return -EIO; 1623 } 1624 1625 /* Decode the outcome */ 1626 WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb); 1627 1628 /* Determine success or failure */ 1629 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1630 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n", 1631 fqid, r->verb, r->rslt); 1632 return -EIO; 1633 } 1634 1635 return 0; 1636 } 1637 1638 struct qbman_cdan_ctrl_desc { 1639 u8 verb; 1640 u8 reserved; 1641 __le16 ch; 1642 u8 we; 1643 u8 ctrl; 1644 __le16 reserved2; 1645 __le64 cdan_ctx; 1646 u8 reserved3[48]; 1647 1648 }; 1649 1650 struct qbman_cdan_ctrl_rslt { 1651 u8 verb; 1652 u8 rslt; 1653 __le16 ch; 1654 u8 reserved[60]; 1655 }; 1656 1657 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid, 1658 u8 we_mask, u8 cdan_en, 1659 u64 ctx) 1660 { 1661 struct qbman_cdan_ctrl_desc *p = NULL; 1662 struct qbman_cdan_ctrl_rslt *r = NULL; 1663 1664 /* Start the management command */ 1665 p = qbman_swp_mc_start(s); 1666 if (!p) 1667 return -EBUSY; 1668 1669 /* Encode the caller-provided attributes */ 1670 p->ch = cpu_to_le16(channelid); 1671 p->we = we_mask; 1672 if (cdan_en) 1673 p->ctrl = 1; 1674 else 1675 p->ctrl = 0; 1676 p->cdan_ctx = cpu_to_le64(ctx); 1677 1678 /* Complete the management command */ 1679 r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE); 1680 if (unlikely(!r)) { 1681 pr_err("qbman: wqchan config failed, no response\n"); 1682 return -EIO; 1683 } 1684 1685 WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE); 1686 1687 /* Determine success or failure */ 1688 if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) { 1689 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n", 1690 channelid, r->rslt); 1691 return -EIO; 1692 } 1693 1694 return 0; 1695 } 1696 1697 #define QBMAN_RESPONSE_VERB_MASK 0x7f 1698 #define QBMAN_FQ_QUERY_NP 0x45 1699 #define QBMAN_BP_QUERY 0x32 1700 1701 struct qbman_fq_query_desc { 1702 u8 verb; 1703 u8 reserved[3]; 1704 __le32 fqid; 1705 u8 reserved2[56]; 1706 }; 1707 1708 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid, 1709 struct qbman_fq_query_np_rslt *r) 1710 { 1711 struct qbman_fq_query_desc *p; 1712 void *resp; 1713 1714 p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s); 1715 if (!p) 1716 return -EBUSY; 1717 1718 /* FQID is a 24 bit value */ 1719 p->fqid = cpu_to_le32(fqid & 0x00FFFFFF); 1720 resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); 1721 if (!resp) { 1722 pr_err("qbman: Query FQID %d NP fields failed, no response\n", 1723 fqid); 1724 return -EIO; 1725 } 1726 *r = *(struct qbman_fq_query_np_rslt *)resp; 1727 /* Decode the outcome */ 1728 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP); 1729 1730 /* Determine success or failure */ 1731 if (r->rslt != QBMAN_MC_RSLT_OK) { 1732 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", 1733 p->fqid, r->rslt); 1734 return -EIO; 1735 } 1736 1737 return 0; 1738 } 1739 1740 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r) 1741 { 1742 return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF); 1743 } 1744 1745 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r) 1746 { 1747 return le32_to_cpu(r->byte_cnt); 1748 } 1749 1750 struct qbman_bp_query_desc { 1751 u8 verb; 1752 u8 reserved; 1753 __le16 bpid; 1754 u8 reserved2[60]; 1755 }; 1756 1757 int qbman_bp_query(struct qbman_swp *s, u16 bpid, 1758 struct qbman_bp_query_rslt *r) 1759 { 1760 struct qbman_bp_query_desc *p; 1761 void *resp; 1762 1763 p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s); 1764 if (!p) 1765 return -EBUSY; 1766 1767 p->bpid = cpu_to_le16(bpid); 1768 resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY); 1769 if (!resp) { 1770 pr_err("qbman: Query BPID %d fields failed, no response\n", 1771 bpid); 1772 return -EIO; 1773 } 1774 *r = *(struct qbman_bp_query_rslt *)resp; 1775 /* Decode the outcome */ 1776 WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); 1777 1778 /* Determine success or failure */ 1779 if (r->rslt != QBMAN_MC_RSLT_OK) { 1780 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n", 1781 bpid, r->rslt); 1782 return -EIO; 1783 } 1784 1785 return 0; 1786 } 1787 1788 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a) 1789 { 1790 return le32_to_cpu(a->fill); 1791 } 1792