1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright 2014-2016 Freescale Semiconductor Inc. 4 * Copyright 2016 NXP 5 * 6 */ 7 #include <linux/types.h> 8 #include <linux/fsl/mc.h> 9 #include <soc/fsl/dpaa2-io.h> 10 #include <linux/init.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/interrupt.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/slab.h> 16 17 #include "dpio.h" 18 #include "qbman-portal.h" 19 20 struct dpaa2_io { 21 struct dpaa2_io_desc dpio_desc; 22 struct qbman_swp_desc swp_desc; 23 struct qbman_swp *swp; 24 struct list_head node; 25 /* protect against multiple management commands */ 26 spinlock_t lock_mgmt_cmd; 27 /* protect notifications list */ 28 spinlock_t lock_notifications; 29 struct list_head notifications; 30 }; 31 32 struct dpaa2_io_store { 33 unsigned int max; 34 dma_addr_t paddr; 35 struct dpaa2_dq *vaddr; 36 void *alloced_addr; /* unaligned value from kmalloc() */ 37 unsigned int idx; /* position of the next-to-be-returned entry */ 38 struct qbman_swp *swp; /* portal used to issue VDQCR */ 39 struct device *dev; /* device used for DMA mapping */ 40 }; 41 42 /* keep a per cpu array of DPIOs for fast access */ 43 static struct dpaa2_io *dpio_by_cpu[NR_CPUS]; 44 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list); 45 static DEFINE_SPINLOCK(dpio_list_lock); 46 47 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, 48 int cpu) 49 { 50 if (d) 51 return d; 52 53 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) 54 return NULL; 55 56 /* 57 * If cpu == -1, choose the current cpu, with no guarantees about 58 * potentially being migrated away. 59 */ 60 if (unlikely(cpu < 0)) 61 cpu = smp_processor_id(); 62 63 /* If a specific cpu was requested, pick it up immediately */ 64 return dpio_by_cpu[cpu]; 65 } 66 67 static inline struct dpaa2_io *service_select(struct dpaa2_io *d) 68 { 69 if (d) 70 return d; 71 72 spin_lock(&dpio_list_lock); 73 d = list_entry(dpio_list.next, struct dpaa2_io, node); 74 list_del(&d->node); 75 list_add_tail(&d->node, &dpio_list); 76 spin_unlock(&dpio_list_lock); 77 78 return d; 79 } 80 81 /** 82 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu 83 * @cpu: the cpu id 84 * 85 * Return the affine dpaa2_io service, or NULL if there is no service affined 86 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available 87 * service. 88 */ 89 struct dpaa2_io *dpaa2_io_service_select(int cpu) 90 { 91 if (cpu == DPAA2_IO_ANY_CPU) 92 return service_select(NULL); 93 94 return service_select_by_cpu(NULL, cpu); 95 } 96 EXPORT_SYMBOL_GPL(dpaa2_io_service_select); 97 98 /** 99 * dpaa2_io_create() - create a dpaa2_io object. 100 * @desc: the dpaa2_io descriptor 101 * 102 * Activates a "struct dpaa2_io" corresponding to the given config of an actual 103 * DPIO object. 104 * 105 * Return a valid dpaa2_io object for success, or NULL for failure. 106 */ 107 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) 108 { 109 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); 110 111 if (!obj) 112 return NULL; 113 114 /* check if CPU is out of range (-1 means any cpu) */ 115 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { 116 kfree(obj); 117 return NULL; 118 } 119 120 obj->dpio_desc = *desc; 121 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; 122 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; 123 obj->swp_desc.qman_version = obj->dpio_desc.qman_version; 124 obj->swp = qbman_swp_init(&obj->swp_desc); 125 126 if (!obj->swp) { 127 kfree(obj); 128 return NULL; 129 } 130 131 INIT_LIST_HEAD(&obj->node); 132 spin_lock_init(&obj->lock_mgmt_cmd); 133 spin_lock_init(&obj->lock_notifications); 134 INIT_LIST_HEAD(&obj->notifications); 135 136 /* For now only enable DQRR interrupts */ 137 qbman_swp_interrupt_set_trigger(obj->swp, 138 QBMAN_SWP_INTERRUPT_DQRI); 139 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff); 140 if (obj->dpio_desc.receives_notifications) 141 qbman_swp_push_set(obj->swp, 0, 1); 142 143 spin_lock(&dpio_list_lock); 144 list_add_tail(&obj->node, &dpio_list); 145 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu]) 146 dpio_by_cpu[desc->cpu] = obj; 147 spin_unlock(&dpio_list_lock); 148 149 return obj; 150 } 151 152 /** 153 * dpaa2_io_down() - release the dpaa2_io object. 154 * @d: the dpaa2_io object to be released. 155 * 156 * The "struct dpaa2_io" type can represent an individual DPIO object (as 157 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", 158 * which can be used to group/encapsulate multiple DPIO objects. In all cases, 159 * each handle obtained should be released using this function. 160 */ 161 void dpaa2_io_down(struct dpaa2_io *d) 162 { 163 kfree(d); 164 } 165 166 #define DPAA_POLL_MAX 32 167 168 /** 169 * dpaa2_io_irq() - ISR for DPIO interrupts 170 * 171 * @obj: the given DPIO object. 172 * 173 * Return IRQ_HANDLED for success or IRQ_NONE if there 174 * were no pending interrupts. 175 */ 176 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj) 177 { 178 const struct dpaa2_dq *dq; 179 int max = 0; 180 struct qbman_swp *swp; 181 u32 status; 182 183 swp = obj->swp; 184 status = qbman_swp_interrupt_read_status(swp); 185 if (!status) 186 return IRQ_NONE; 187 188 dq = qbman_swp_dqrr_next(swp); 189 while (dq) { 190 if (qbman_result_is_SCN(dq)) { 191 struct dpaa2_io_notification_ctx *ctx; 192 u64 q64; 193 194 q64 = qbman_result_SCN_ctx(dq); 195 ctx = (void *)(uintptr_t)q64; 196 ctx->cb(ctx); 197 } else { 198 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n"); 199 } 200 qbman_swp_dqrr_consume(swp, dq); 201 ++max; 202 if (max > DPAA_POLL_MAX) 203 goto done; 204 dq = qbman_swp_dqrr_next(swp); 205 } 206 done: 207 qbman_swp_interrupt_clear_status(swp, status); 208 qbman_swp_interrupt_set_inhibit(swp, 0); 209 return IRQ_HANDLED; 210 } 211 212 /** 213 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN 214 * notifications on the given DPIO service. 215 * @d: the given DPIO service. 216 * @ctx: the notification context. 217 * 218 * The caller should make the MC command to attach a DPAA2 object to 219 * a DPIO after this function completes successfully. In that way: 220 * (a) The DPIO service is "ready" to handle a notification arrival 221 * (which might happen before the "attach" command to MC has 222 * returned control of execution back to the caller) 223 * (b) The DPIO service can provide back to the caller the 'dpio_id' and 224 * 'qman64' parameters that it should pass along in the MC command 225 * in order for the object to be configured to produce the right 226 * notification fields to the DPIO service. 227 * 228 * Return 0 for success, or -ENODEV for failure. 229 */ 230 int dpaa2_io_service_register(struct dpaa2_io *d, 231 struct dpaa2_io_notification_ctx *ctx) 232 { 233 unsigned long irqflags; 234 235 d = service_select_by_cpu(d, ctx->desired_cpu); 236 if (!d) 237 return -ENODEV; 238 239 ctx->dpio_id = d->dpio_desc.dpio_id; 240 ctx->qman64 = (u64)(uintptr_t)ctx; 241 ctx->dpio_private = d; 242 spin_lock_irqsave(&d->lock_notifications, irqflags); 243 list_add(&ctx->node, &d->notifications); 244 spin_unlock_irqrestore(&d->lock_notifications, irqflags); 245 246 /* Enable the generation of CDAN notifications */ 247 if (ctx->is_cdan) 248 return qbman_swp_CDAN_set_context_enable(d->swp, 249 (u16)ctx->id, 250 ctx->qman64); 251 return 0; 252 } 253 EXPORT_SYMBOL_GPL(dpaa2_io_service_register); 254 255 /** 256 * dpaa2_io_service_deregister - The opposite of 'register'. 257 * @service: the given DPIO service. 258 * @ctx: the notification context. 259 * 260 * This function should be called only after sending the MC command to 261 * to detach the notification-producing device from the DPIO. 262 */ 263 void dpaa2_io_service_deregister(struct dpaa2_io *service, 264 struct dpaa2_io_notification_ctx *ctx) 265 { 266 struct dpaa2_io *d = ctx->dpio_private; 267 unsigned long irqflags; 268 269 if (ctx->is_cdan) 270 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id); 271 272 spin_lock_irqsave(&d->lock_notifications, irqflags); 273 list_del(&ctx->node); 274 spin_unlock_irqrestore(&d->lock_notifications, irqflags); 275 } 276 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); 277 278 /** 279 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. 280 * @d: the given DPIO service. 281 * @ctx: the notification context. 282 * 283 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is 284 * considered "disarmed". Ie. the user can issue pull dequeue operations on that 285 * traffic source for as long as it likes. Eventually it may wish to "rearm" 286 * that source to allow it to produce another FQDAN/CDAN, that's what this 287 * function achieves. 288 * 289 * Return 0 for success. 290 */ 291 int dpaa2_io_service_rearm(struct dpaa2_io *d, 292 struct dpaa2_io_notification_ctx *ctx) 293 { 294 unsigned long irqflags; 295 int err; 296 297 d = service_select_by_cpu(d, ctx->desired_cpu); 298 if (!unlikely(d)) 299 return -ENODEV; 300 301 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 302 if (ctx->is_cdan) 303 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id); 304 else 305 err = qbman_swp_fq_schedule(d->swp, ctx->id); 306 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 307 308 return err; 309 } 310 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm); 311 312 /** 313 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. 314 * @d: the given DPIO service. 315 * @fqid: the given frame queue id. 316 * @s: the dpaa2_io_store object for the result. 317 * 318 * Return 0 for success, or error code for failure. 319 */ 320 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, 321 struct dpaa2_io_store *s) 322 { 323 struct qbman_pull_desc pd; 324 int err; 325 326 qbman_pull_desc_clear(&pd); 327 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); 328 qbman_pull_desc_set_numframes(&pd, (u8)s->max); 329 qbman_pull_desc_set_fq(&pd, fqid); 330 331 d = service_select(d); 332 if (!d) 333 return -ENODEV; 334 s->swp = d->swp; 335 err = qbman_swp_pull(d->swp, &pd); 336 if (err) 337 s->swp = NULL; 338 339 return err; 340 } 341 EXPORT_SYMBOL(dpaa2_io_service_pull_fq); 342 343 /** 344 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. 345 * @d: the given DPIO service. 346 * @channelid: the given channel id. 347 * @s: the dpaa2_io_store object for the result. 348 * 349 * Return 0 for success, or error code for failure. 350 */ 351 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, 352 struct dpaa2_io_store *s) 353 { 354 struct qbman_pull_desc pd; 355 int err; 356 357 qbman_pull_desc_clear(&pd); 358 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); 359 qbman_pull_desc_set_numframes(&pd, (u8)s->max); 360 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); 361 362 d = service_select(d); 363 if (!d) 364 return -ENODEV; 365 366 s->swp = d->swp; 367 err = qbman_swp_pull(d->swp, &pd); 368 if (err) 369 s->swp = NULL; 370 371 return err; 372 } 373 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel); 374 375 /** 376 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. 377 * @d: the given DPIO service. 378 * @fqid: the given frame queue id. 379 * @fd: the frame descriptor which is enqueued. 380 * 381 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready, 382 * or -ENODEV if there is no dpio service. 383 */ 384 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, 385 u32 fqid, 386 const struct dpaa2_fd *fd) 387 { 388 struct qbman_eq_desc ed; 389 390 d = service_select(d); 391 if (!d) 392 return -ENODEV; 393 394 qbman_eq_desc_clear(&ed); 395 qbman_eq_desc_set_no_orp(&ed, 0); 396 qbman_eq_desc_set_fq(&ed, fqid); 397 398 return qbman_swp_enqueue(d->swp, &ed, fd); 399 } 400 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); 401 402 /** 403 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. 404 * @d: the given DPIO service. 405 * @qdid: the given queuing destination id. 406 * @prio: the given queuing priority. 407 * @qdbin: the given queuing destination bin. 408 * @fd: the frame descriptor which is enqueued. 409 * 410 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, 411 * or -ENODEV if there is no dpio service. 412 */ 413 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, 414 u32 qdid, u8 prio, u16 qdbin, 415 const struct dpaa2_fd *fd) 416 { 417 struct qbman_eq_desc ed; 418 419 d = service_select(d); 420 if (!d) 421 return -ENODEV; 422 423 qbman_eq_desc_clear(&ed); 424 qbman_eq_desc_set_no_orp(&ed, 0); 425 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); 426 427 return qbman_swp_enqueue(d->swp, &ed, fd); 428 } 429 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd); 430 431 /** 432 * dpaa2_io_service_release() - Release buffers to a buffer pool. 433 * @d: the given DPIO object. 434 * @bpid: the buffer pool id. 435 * @buffers: the buffers to be released. 436 * @num_buffers: the number of the buffers to be released. 437 * 438 * Return 0 for success, and negative error code for failure. 439 */ 440 int dpaa2_io_service_release(struct dpaa2_io *d, 441 u32 bpid, 442 const u64 *buffers, 443 unsigned int num_buffers) 444 { 445 struct qbman_release_desc rd; 446 447 d = service_select(d); 448 if (!d) 449 return -ENODEV; 450 451 qbman_release_desc_clear(&rd); 452 qbman_release_desc_set_bpid(&rd, bpid); 453 454 return qbman_swp_release(d->swp, &rd, buffers, num_buffers); 455 } 456 EXPORT_SYMBOL_GPL(dpaa2_io_service_release); 457 458 /** 459 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. 460 * @d: the given DPIO object. 461 * @bpid: the buffer pool id. 462 * @buffers: the buffer addresses for acquired buffers. 463 * @num_buffers: the expected number of the buffers to acquire. 464 * 465 * Return a negative error code if the command failed, otherwise it returns 466 * the number of buffers acquired, which may be less than the number requested. 467 * Eg. if the buffer pool is empty, this will return zero. 468 */ 469 int dpaa2_io_service_acquire(struct dpaa2_io *d, 470 u32 bpid, 471 u64 *buffers, 472 unsigned int num_buffers) 473 { 474 unsigned long irqflags; 475 int err; 476 477 d = service_select(d); 478 if (!d) 479 return -ENODEV; 480 481 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 482 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers); 483 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 484 485 return err; 486 } 487 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire); 488 489 /* 490 * 'Stores' are reusable memory blocks for holding dequeue results, and to 491 * assist with parsing those results. 492 */ 493 494 /** 495 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. 496 * @max_frames: the maximum number of dequeued result for frames, must be <= 16. 497 * @dev: the device to allow mapping/unmapping the DMAable region. 498 * 499 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". 500 * The 'dpaa2_io_store' returned is a DPIO service managed object. 501 * 502 * Return pointer to dpaa2_io_store struct for successfully created storage 503 * memory, or NULL on error. 504 */ 505 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, 506 struct device *dev) 507 { 508 struct dpaa2_io_store *ret; 509 size_t size; 510 511 if (!max_frames || (max_frames > 16)) 512 return NULL; 513 514 ret = kmalloc(sizeof(*ret), GFP_KERNEL); 515 if (!ret) 516 return NULL; 517 518 ret->max = max_frames; 519 size = max_frames * sizeof(struct dpaa2_dq) + 64; 520 ret->alloced_addr = kzalloc(size, GFP_KERNEL); 521 if (!ret->alloced_addr) { 522 kfree(ret); 523 return NULL; 524 } 525 526 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); 527 ret->paddr = dma_map_single(dev, ret->vaddr, 528 sizeof(struct dpaa2_dq) * max_frames, 529 DMA_FROM_DEVICE); 530 if (dma_mapping_error(dev, ret->paddr)) { 531 kfree(ret->alloced_addr); 532 kfree(ret); 533 return NULL; 534 } 535 536 ret->idx = 0; 537 ret->dev = dev; 538 539 return ret; 540 } 541 EXPORT_SYMBOL_GPL(dpaa2_io_store_create); 542 543 /** 544 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue 545 * result. 546 * @s: the storage memory to be destroyed. 547 */ 548 void dpaa2_io_store_destroy(struct dpaa2_io_store *s) 549 { 550 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, 551 DMA_FROM_DEVICE); 552 kfree(s->alloced_addr); 553 kfree(s); 554 } 555 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy); 556 557 /** 558 * dpaa2_io_store_next() - Determine when the next dequeue result is available. 559 * @s: the dpaa2_io_store object. 560 * @is_last: indicate whether this is the last frame in the pull command. 561 * 562 * When an object driver performs dequeues to a dpaa2_io_store, this function 563 * can be used to determine when the next frame result is available. Once 564 * this function returns non-NULL, a subsequent call to it will try to find 565 * the next dequeue result. 566 * 567 * Note that if a pull-dequeue has a NULL result because the target FQ/channel 568 * was empty, then this function will also return NULL (rather than expecting 569 * the caller to always check for this. As such, "is_last" can be used to 570 * differentiate between "end-of-empty-dequeue" and "still-waiting". 571 * 572 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. 573 */ 574 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) 575 { 576 int match; 577 struct dpaa2_dq *ret = &s->vaddr[s->idx]; 578 579 match = qbman_result_has_new_result(s->swp, ret); 580 if (!match) { 581 *is_last = 0; 582 return NULL; 583 } 584 585 s->idx++; 586 587 if (dpaa2_dq_is_pull_complete(ret)) { 588 *is_last = 1; 589 s->idx = 0; 590 /* 591 * If we get an empty dequeue result to terminate a zero-results 592 * vdqcr, return NULL to the caller rather than expecting him to 593 * check non-NULL results every time. 594 */ 595 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) 596 ret = NULL; 597 } else { 598 *is_last = 0; 599 } 600 601 return ret; 602 } 603 EXPORT_SYMBOL_GPL(dpaa2_io_store_next); 604 605 /** 606 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. 607 * @d: the given DPIO object. 608 * @fqid: the id of frame queue to be queried. 609 * @fcnt: the queried frame count. 610 * @bcnt: the queried byte count. 611 * 612 * Knowing the FQ count at run-time can be useful in debugging situations. 613 * The instantaneous frame- and byte-count are hereby returned. 614 * 615 * Return 0 for a successful query, and negative error code if query fails. 616 */ 617 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid, 618 u32 *fcnt, u32 *bcnt) 619 { 620 struct qbman_fq_query_np_rslt state; 621 struct qbman_swp *swp; 622 unsigned long irqflags; 623 int ret; 624 625 d = service_select(d); 626 if (!d) 627 return -ENODEV; 628 629 swp = d->swp; 630 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 631 ret = qbman_fq_query_state(swp, fqid, &state); 632 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 633 if (ret) 634 return ret; 635 *fcnt = qbman_fq_state_frame_count(&state); 636 *bcnt = qbman_fq_state_byte_count(&state); 637 638 return 0; 639 } 640 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count); 641 642 /** 643 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a 644 * buffer pool. 645 * @d: the given DPIO object. 646 * @bpid: the index of buffer pool to be queried. 647 * @num: the queried number of buffers in the buffer pool. 648 * 649 * Return 0 for a successful query, and negative error code if query fails. 650 */ 651 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num) 652 { 653 struct qbman_bp_query_rslt state; 654 struct qbman_swp *swp; 655 unsigned long irqflags; 656 int ret; 657 658 d = service_select(d); 659 if (!d) 660 return -ENODEV; 661 662 swp = d->swp; 663 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags); 664 ret = qbman_bp_query(swp, bpid, &state); 665 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags); 666 if (ret) 667 return ret; 668 *num = qbman_bp_info_num_free_bufs(&state); 669 return 0; 670 } 671 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); 672