1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright 2014-2016 Freescale Semiconductor Inc. 4 * Copyright 2016-2019 NXP 5 * 6 */ 7 #include <linux/types.h> 8 #include <linux/fsl/mc.h> 9 #include <soc/fsl/dpaa2-io.h> 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/interrupt.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/slab.h> 16 17 #include "dpio.h" 18 #include "qbman-portal.h" 19 20 struct dpaa2_io { 21 struct dpaa2_io_desc dpio_desc; 22 struct qbman_swp_desc swp_desc; 23 struct qbman_swp *swp; 24 struct list_head node; 25 /* protect against multiple management commands */ 26 spinlock_t lock_mgmt_cmd; 27 /* protect notifications list */ 28 spinlock_t lock_notifications; 29 struct list_head notifications; 30 struct device *dev; 31 }; 32 33 struct dpaa2_io_store { 34 unsigned int max; 35 dma_addr_t paddr; 36 struct dpaa2_dq *vaddr; 37 void *alloced_addr; /* unaligned value from kmalloc() */ 38 unsigned int idx; /* position of the next-to-be-returned entry */ 39 struct qbman_swp *swp; /* portal used to issue VDQCR */ 40 struct device *dev; /* device used for DMA mapping */ 41 }; 42 43 /* keep a per cpu array of DPIOs for fast access */ 44 static struct dpaa2_io *dpio_by_cpu[NR_CPUS]; 45 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list); 46 static DEFINE_SPINLOCK(dpio_list_lock); 47 48 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, 49 int cpu) 50 { 51 if (d) 52 return d; 53 54 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) 55 return NULL; 56 57 /* 58 * If cpu == -1, choose the current cpu, with no guarantees about 59 * potentially being migrated away. 60 */ 61 if (unlikely(cpu < 0)) 62 cpu = smp_processor_id(); 63 64 /* If a specific cpu was requested, pick it up immediately */ 65 return dpio_by_cpu[cpu]; 66 } 67 68 static inline struct dpaa2_io *service_select(struct dpaa2_io *d) 69 { 70 if (d) 71 return d; 72 73 spin_lock(&dpio_list_lock); 74 d = list_entry(dpio_list.next, struct dpaa2_io, node); 75 list_del(&d->node); 76 list_add_tail(&d->node, &dpio_list); 77 spin_unlock(&dpio_list_lock); 78 79 return d; 80 } 81 82 /** 83 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu 84 * @cpu: the cpu id 85 * 86 * Return the affine dpaa2_io service, or NULL if there is no service affined 87 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available 88 * service. 89 */ 90 struct dpaa2_io *dpaa2_io_service_select(int cpu) 91 { 92 if (cpu == DPAA2_IO_ANY_CPU) 93 return service_select(NULL); 94 95 return service_select_by_cpu(NULL, cpu); 96 } 97 EXPORT_SYMBOL_GPL(dpaa2_io_service_select); 98 99 /** 100 * dpaa2_io_create() - create a dpaa2_io object. 101 * @desc: the dpaa2_io descriptor 102 * @dev: the actual DPIO device 103 * 104 * Activates a "struct dpaa2_io" corresponding to the given config of an actual 105 * DPIO object. 106 * 107 * Return a valid dpaa2_io object for success, or NULL for failure. 108 */ 109 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, 110 struct device *dev) 111 { 112 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); 113 114 if (!obj) 115 return NULL; 116 117 /* check if CPU is out of range (-1 means any cpu) */ 118 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { 119 kfree(obj); 120 return NULL; 121 } 122 123 obj->dpio_desc = *desc; 124 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; 125 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; 126 obj->swp_desc.qman_version = obj->dpio_desc.qman_version; 127 obj->swp = qbman_swp_init(&obj->swp_desc); 128 129 if (!obj->swp) { 130 kfree(obj); 131 return NULL; 132 } 133 134 INIT_LIST_HEAD(&obj->node); 135 spin_lock_init(&obj->lock_mgmt_cmd); 136 spin_lock_init(&obj->lock_notifications); 137 INIT_LIST_HEAD(&obj->notifications); 138 139 /* For now only enable DQRR interrupts */ 140 qbman_swp_interrupt_set_trigger(obj->swp, 141 QBMAN_SWP_INTERRUPT_DQRI); 142 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff); 143 if (obj->dpio_desc.receives_notifications) 144 qbman_swp_push_set(obj->swp, 0, 1); 145 146 spin_lock(&dpio_list_lock); 147 list_add_tail(&obj->node, &dpio_list); 148 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu]) 149 dpio_by_cpu[desc->cpu] = obj; 150 spin_unlock(&dpio_list_lock); 151 152 obj->dev = dev; 153 154 return obj; 155 } 156 157 /** 158 * dpaa2_io_down() - release the dpaa2_io object. 159 * @d: the dpaa2_io object to be released. 160 * 161 * The "struct dpaa2_io" type can represent an individual DPIO object (as 162 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", 163 * which can be used to group/encapsulate multiple DPIO objects. In all cases, 164 * each handle obtained should be released using this function. 165 */ 166 void dpaa2_io_down(struct dpaa2_io *d) 167 { 168 spin_lock(&dpio_list_lock); 169 dpio_by_cpu[d->dpio_desc.cpu] = NULL; 170 list_del(&d->node); 171 spin_unlock(&dpio_list_lock); 172 173 kfree(d); 174 } 175 176 #define DPAA_POLL_MAX 32 177 178 /** 179 * dpaa2_io_irq() - ISR for DPIO interrupts 180 * 181 * @obj: the given DPIO object. 182 * 183 * Return IRQ_HANDLED for success or IRQ_NONE if there 184 * were no pending interrupts. 185 */ 186 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj) 187 { 188 const struct dpaa2_dq *dq; 189 int max = 0; 190 struct qbman_swp *swp; 191 u32 status; 192 193 swp = obj->swp; 194 status = qbman_swp_interrupt_read_status(swp); 195 if (!status) 196 return IRQ_NONE; 197 198 dq = qbman_swp_dqrr_next(swp); 199 while (dq) { 200 if (qbman_result_is_SCN(dq)) { 201 struct dpaa2_io_notification_ctx *ctx; 202 u64 q64; 203 204 q64 = qbman_result_SCN_ctx(dq); 205 ctx = (void *)(uintptr_t)q64; 206 ctx->cb(ctx); 207 } else { 208 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n"); 209 } 210 qbman_swp_dqrr_consume(swp, dq); 211 ++max; 212 if (max > DPAA_POLL_MAX) 213 goto done; 214 dq = qbman_swp_dqrr_next(swp); 215 } 216 done: 217 qbman_swp_interrupt_clear_status(swp, status); 218 qbman_swp_interrupt_set_inhibit(swp, 0); 219 return IRQ_HANDLED; 220 } 221 222 /** 223 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object 224 * 225 * @d: the given DPIO object. 226 * 227 * Return the cpu associated with the DPIO object 228 */ 229 int dpaa2_io_get_cpu(struct dpaa2_io *d) 230 { 231 return d->dpio_desc.cpu; 232 } 233 EXPORT_SYMBOL(dpaa2_io_get_cpu); 234 235 /** 236 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN 237 * notifications on the given DPIO service. 238 * @d: the given DPIO service. 239 * @ctx: the notification context. 240 * @dev: the device that requests the register 241 * 242 * The caller should make the MC command to attach a DPAA2 object to 243 * a DPIO after this function completes successfully. In that way: 244 * (a) The DPIO service is "ready" to handle a notification arrival 245 * (which might happen before the "attach" command to MC has 246 * returned control of execution back to the caller) 247 * (b) The DPIO service can provide back to the caller the 'dpio_id' and 248 * 'qman64' parameters that it should pass along in the MC command 249 * in order for the object to be configured to produce the right 250 * notification fields to the DPIO service. 251 * 252 * Return 0 for success, or -ENODEV for failure. 253 */ 254 int dpaa2_io_service_register(struct dpaa2_io *d, 255 struct dpaa2_io_notification_ctx *ctx, 256 struct device *dev) 257 { 258 struct device_link *link; 259 unsigned long irqflags; 260 261 d = service_select_by_cpu(d, ctx->desired_cpu); 262 if (!d) 263 return -ENODEV; 264 265 link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 266 if (!link) 267 return -EINVAL; 268 269 ctx->dpio_id = d->dpio_desc.dpio_id; 270 ctx->qman64 = (u64)(uintptr_t)ctx; 271 ctx->dpio_private = d; 272 spin_lock_irqsave(&d->lock_notifications, irqflags); 273 list_add(&ctx->node, &d->notifications); 274 spin_unlock_irqrestore(&d->lock_notifications, irqflags); 275 276 /* Enable the generation of CDAN notifications */ 277 if (ctx->is_cdan) 278 return qbman_swp_CDAN_set_context_enable(d->swp, 279 (u16)ctx->id, 280 ctx->qman64); 281 return 0; 282 } 283 EXPORT_SYMBOL_GPL(dpaa2_io_service_register); 284 285 /** 286 * dpaa2_io_service_deregister - The opposite of 'register'. 287 * @service: the given DPIO service. 288 * @ctx: the notification context. 289 * @dev: the device that requests to be deregistered 290 * 291 * This function should be called only after sending the MC command to 292 * to detach the notification-producing device from the DPIO. 293 */ 294 void dpaa2_io_service_deregister(struct dpaa2_io *service, 295 struct dpaa2_io_notification_ctx *ctx, 296 struct device *dev) 297 { 298 struct dpaa2_io *d = ctx->dpio_private; 299 unsigned long irqflags; 300 301 if (ctx->is_cdan) 302 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id); 303 304 spin_lock_irqsave(&d->lock_notifications, irqflags); 305 list_del(&ctx->node); 306 spin_unlock_irqrestore(&d->lock_notifications, irqflags); 307 308 } 309 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); 310 311 /** 312 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. 313 * @d: the given DPIO service. 314 * @ctx: the notification context. 315 * 316 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is 317 * considered "disarmed". Ie. the user can issue pull dequeue operations on that 318 * traffic source for as long as it likes. Eventually it may wish to "rearm" 319 * that source to allow it to produce another FQDAN/CDAN, that's what this 320 * function achieves. 321 * 322 * Return 0 for success. 323 */ 324 int dpaa2_io_service_rearm(struct dpaa2_io *d, 325 struct dpaa2_io_notification_ctx *ctx) 326 { 327 unsigned long irqflags; 328 int err; 329 330 d = service_select_by_cpu(d, ctx->desired_cpu); 331 if (!unlikely(d)) 332 return -ENODEV; 333 334 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 335 if (ctx->is_cdan) 336 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id); 337 else 338 err = qbman_swp_fq_schedule(d->swp, ctx->id); 339 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 340 341 return err; 342 } 343 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm); 344 345 /** 346 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. 347 * @d: the given DPIO service. 348 * @fqid: the given frame queue id. 349 * @s: the dpaa2_io_store object for the result. 350 * 351 * Return 0 for success, or error code for failure. 352 */ 353 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, 354 struct dpaa2_io_store *s) 355 { 356 struct qbman_pull_desc pd; 357 int err; 358 359 qbman_pull_desc_clear(&pd); 360 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); 361 qbman_pull_desc_set_numframes(&pd, (u8)s->max); 362 qbman_pull_desc_set_fq(&pd, fqid); 363 364 d = service_select(d); 365 if (!d) 366 return -ENODEV; 367 s->swp = d->swp; 368 err = qbman_swp_pull(d->swp, &pd); 369 if (err) 370 s->swp = NULL; 371 372 return err; 373 } 374 EXPORT_SYMBOL(dpaa2_io_service_pull_fq); 375 376 /** 377 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. 378 * @d: the given DPIO service. 379 * @channelid: the given channel id. 380 * @s: the dpaa2_io_store object for the result. 381 * 382 * Return 0 for success, or error code for failure. 383 */ 384 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, 385 struct dpaa2_io_store *s) 386 { 387 struct qbman_pull_desc pd; 388 int err; 389 390 qbman_pull_desc_clear(&pd); 391 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); 392 qbman_pull_desc_set_numframes(&pd, (u8)s->max); 393 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); 394 395 d = service_select(d); 396 if (!d) 397 return -ENODEV; 398 399 s->swp = d->swp; 400 err = qbman_swp_pull(d->swp, &pd); 401 if (err) 402 s->swp = NULL; 403 404 return err; 405 } 406 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel); 407 408 /** 409 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. 410 * @d: the given DPIO service. 411 * @fqid: the given frame queue id. 412 * @fd: the frame descriptor which is enqueued. 413 * 414 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 415 * or -ENODEV if there is no dpio service. 416 */ 417 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, 418 u32 fqid, 419 const struct dpaa2_fd *fd) 420 { 421 struct qbman_eq_desc ed; 422 423 d = service_select(d); 424 if (!d) 425 return -ENODEV; 426 427 qbman_eq_desc_clear(&ed); 428 qbman_eq_desc_set_no_orp(&ed, 0); 429 qbman_eq_desc_set_fq(&ed, fqid); 430 431 return qbman_swp_enqueue(d->swp, &ed, fd); 432 } 433 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); 434 435 /** 436 * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames 437 * to a frame queue using one fqid. 438 * @d: the given DPIO service. 439 * @fqid: the given frame queue id. 440 * @fd: the frame descriptor which is enqueued. 441 * @nb: number of frames to be enqueud 442 * 443 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 444 * or -ENODEV if there is no dpio service. 445 */ 446 int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, 447 u32 fqid, 448 const struct dpaa2_fd *fd, 449 int nb) 450 { 451 struct qbman_eq_desc ed; 452 453 d = service_select(d); 454 if (!d) 455 return -ENODEV; 456 457 qbman_eq_desc_clear(&ed); 458 qbman_eq_desc_set_no_orp(&ed, 0); 459 qbman_eq_desc_set_fq(&ed, fqid); 460 461 return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb); 462 } 463 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq); 464 465 /** 466 * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames 467 * to different frame queue using a list of fqids. 468 * @d: the given DPIO service. 469 * @fqid: the given list of frame queue ids. 470 * @fd: the frame descriptor which is enqueued. 471 * @nb: number of frames to be enqueud 472 * 473 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 474 * or -ENODEV if there is no dpio service. 475 */ 476 int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, 477 u32 *fqid, 478 const struct dpaa2_fd *fd, 479 int nb) 480 { 481 int i; 482 struct qbman_eq_desc ed[32]; 483 484 d = service_select(d); 485 if (!d) 486 return -ENODEV; 487 488 for (i = 0; i < nb; i++) { 489 qbman_eq_desc_clear(&ed[i]); 490 qbman_eq_desc_set_no_orp(&ed[i], 0); 491 qbman_eq_desc_set_fq(&ed[i], fqid[i]); 492 } 493 494 return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb); 495 } 496 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq); 497 498 /** 499 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. 500 * @d: the given DPIO service. 501 * @qdid: the given queuing destination id. 502 * @prio: the given queuing priority. 503 * @qdbin: the given queuing destination bin. 504 * @fd: the frame descriptor which is enqueued. 505 * 506 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, 507 * or -ENODEV if there is no dpio service. 508 */ 509 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, 510 u32 qdid, u8 prio, u16 qdbin, 511 const struct dpaa2_fd *fd) 512 { 513 struct qbman_eq_desc ed; 514 515 d = service_select(d); 516 if (!d) 517 return -ENODEV; 518 519 qbman_eq_desc_clear(&ed); 520 qbman_eq_desc_set_no_orp(&ed, 0); 521 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); 522 523 return qbman_swp_enqueue(d->swp, &ed, fd); 524 } 525 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd); 526 527 /** 528 * dpaa2_io_service_release() - Release buffers to a buffer pool. 529 * @d: the given DPIO object. 530 * @bpid: the buffer pool id. 531 * @buffers: the buffers to be released. 532 * @num_buffers: the number of the buffers to be released. 533 * 534 * Return 0 for success, and negative error code for failure. 535 */ 536 int dpaa2_io_service_release(struct dpaa2_io *d, 537 u16 bpid, 538 const u64 *buffers, 539 unsigned int num_buffers) 540 { 541 struct qbman_release_desc rd; 542 543 d = service_select(d); 544 if (!d) 545 return -ENODEV; 546 547 qbman_release_desc_clear(&rd); 548 qbman_release_desc_set_bpid(&rd, bpid); 549 550 return qbman_swp_release(d->swp, &rd, buffers, num_buffers); 551 } 552 EXPORT_SYMBOL_GPL(dpaa2_io_service_release); 553 554 /** 555 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. 556 * @d: the given DPIO object. 557 * @bpid: the buffer pool id. 558 * @buffers: the buffer addresses for acquired buffers. 559 * @num_buffers: the expected number of the buffers to acquire. 560 * 561 * Return a negative error code if the command failed, otherwise it returns 562 * the number of buffers acquired, which may be less than the number requested. 563 * Eg. if the buffer pool is empty, this will return zero. 564 */ 565 int dpaa2_io_service_acquire(struct dpaa2_io *d, 566 u16 bpid, 567 u64 *buffers, 568 unsigned int num_buffers) 569 { 570 unsigned long irqflags; 571 int err; 572 573 d = service_select(d); 574 if (!d) 575 return -ENODEV; 576 577 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 578 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers); 579 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 580 581 return err; 582 } 583 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire); 584 585 /* 586 * 'Stores' are reusable memory blocks for holding dequeue results, and to 587 * assist with parsing those results. 588 */ 589 590 /** 591 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. 592 * @max_frames: the maximum number of dequeued result for frames, must be <= 32. 593 * @dev: the device to allow mapping/unmapping the DMAable region. 594 * 595 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". 596 * The 'dpaa2_io_store' returned is a DPIO service managed object. 597 * 598 * Return pointer to dpaa2_io_store struct for successfully created storage 599 * memory, or NULL on error. 600 */ 601 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, 602 struct device *dev) 603 { 604 struct dpaa2_io_store *ret; 605 size_t size; 606 607 if (!max_frames || (max_frames > 32)) 608 return NULL; 609 610 ret = kmalloc(sizeof(*ret), GFP_KERNEL); 611 if (!ret) 612 return NULL; 613 614 ret->max = max_frames; 615 size = max_frames * sizeof(struct dpaa2_dq) + 64; 616 ret->alloced_addr = kzalloc(size, GFP_KERNEL); 617 if (!ret->alloced_addr) { 618 kfree(ret); 619 return NULL; 620 } 621 622 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); 623 ret->paddr = dma_map_single(dev, ret->vaddr, 624 sizeof(struct dpaa2_dq) * max_frames, 625 DMA_FROM_DEVICE); 626 if (dma_mapping_error(dev, ret->paddr)) { 627 kfree(ret->alloced_addr); 628 kfree(ret); 629 return NULL; 630 } 631 632 ret->idx = 0; 633 ret->dev = dev; 634 635 return ret; 636 } 637 EXPORT_SYMBOL_GPL(dpaa2_io_store_create); 638 639 /** 640 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue 641 * result. 642 * @s: the storage memory to be destroyed. 643 */ 644 void dpaa2_io_store_destroy(struct dpaa2_io_store *s) 645 { 646 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, 647 DMA_FROM_DEVICE); 648 kfree(s->alloced_addr); 649 kfree(s); 650 } 651 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy); 652 653 /** 654 * dpaa2_io_store_next() - Determine when the next dequeue result is available. 655 * @s: the dpaa2_io_store object. 656 * @is_last: indicate whether this is the last frame in the pull command. 657 * 658 * When an object driver performs dequeues to a dpaa2_io_store, this function 659 * can be used to determine when the next frame result is available. Once 660 * this function returns non-NULL, a subsequent call to it will try to find 661 * the next dequeue result. 662 * 663 * Note that if a pull-dequeue has a NULL result because the target FQ/channel 664 * was empty, then this function will also return NULL (rather than expecting 665 * the caller to always check for this. As such, "is_last" can be used to 666 * differentiate between "end-of-empty-dequeue" and "still-waiting". 667 * 668 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. 669 */ 670 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) 671 { 672 int match; 673 struct dpaa2_dq *ret = &s->vaddr[s->idx]; 674 675 match = qbman_result_has_new_result(s->swp, ret); 676 if (!match) { 677 *is_last = 0; 678 return NULL; 679 } 680 681 s->idx++; 682 683 if (dpaa2_dq_is_pull_complete(ret)) { 684 *is_last = 1; 685 s->idx = 0; 686 /* 687 * If we get an empty dequeue result to terminate a zero-results 688 * vdqcr, return NULL to the caller rather than expecting him to 689 * check non-NULL results every time. 690 */ 691 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) 692 ret = NULL; 693 } else { 694 prefetch(&s->vaddr[s->idx]); 695 *is_last = 0; 696 } 697 698 return ret; 699 } 700 EXPORT_SYMBOL_GPL(dpaa2_io_store_next); 701 702 /** 703 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. 704 * @d: the given DPIO object. 705 * @fqid: the id of frame queue to be queried. 706 * @fcnt: the queried frame count. 707 * @bcnt: the queried byte count. 708 * 709 * Knowing the FQ count at run-time can be useful in debugging situations. 710 * The instantaneous frame- and byte-count are hereby returned. 711 * 712 * Return 0 for a successful query, and negative error code if query fails. 713 */ 714 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid, 715 u32 *fcnt, u32 *bcnt) 716 { 717 struct qbman_fq_query_np_rslt state; 718 struct qbman_swp *swp; 719 unsigned long irqflags; 720 int ret; 721 722 d = service_select(d); 723 if (!d) 724 return -ENODEV; 725 726 swp = d->swp; 727 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 728 ret = qbman_fq_query_state(swp, fqid, &state); 729 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 730 if (ret) 731 return ret; 732 *fcnt = qbman_fq_state_frame_count(&state); 733 *bcnt = qbman_fq_state_byte_count(&state); 734 735 return 0; 736 } 737 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count); 738 739 /** 740 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a 741 * buffer pool. 742 * @d: the given DPIO object. 743 * @bpid: the index of buffer pool to be queried. 744 * @num: the queried number of buffers in the buffer pool. 745 * 746 * Return 0 for a successful query, and negative error code if query fails. 747 */ 748 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num) 749 { 750 struct qbman_bp_query_rslt state; 751 struct qbman_swp *swp; 752 unsigned long irqflags; 753 int ret; 754 755 d = service_select(d); 756 if (!d) 757 return -ENODEV; 758 759 swp = d->swp; 760 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 761 ret = qbman_bp_query(swp, bpid, &state); 762 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 763 if (ret) 764 return ret; 765 *num = qbman_bp_info_num_free_bufs(&state); 766 return 0; 767 } 768 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); 769