1 /* 2 * Keystone Queue Manager subsystem driver 3 * 4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com 5 * Authors: Sandeep Nair <sandeep_n@ti.com> 6 * Cyril Chemparathy <cyril@ti.com> 7 * Santosh Shilimkar <santosh.shilimkar@ti.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/debugfs.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/firmware.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/module.h> 25 #include <linux/of_address.h> 26 #include <linux/of_device.h> 27 #include <linux/of_irq.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/slab.h> 30 #include <linux/soc/ti/knav_qmss.h> 31 32 #include "knav_qmss.h" 33 34 static struct knav_device *kdev; 35 static DEFINE_MUTEX(knav_dev_lock); 36 37 /* Queue manager register indices in DTS */ 38 #define KNAV_QUEUE_PEEK_REG_INDEX 0 39 #define KNAV_QUEUE_STATUS_REG_INDEX 1 40 #define KNAV_QUEUE_CONFIG_REG_INDEX 2 41 #define KNAV_QUEUE_REGION_REG_INDEX 3 42 #define KNAV_QUEUE_PUSH_REG_INDEX 4 43 #define KNAV_QUEUE_POP_REG_INDEX 5 44 45 /* PDSP register indices in DTS */ 46 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 47 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 48 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 49 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 50 51 #define knav_queue_idx_to_inst(kdev, idx) \ 52 (kdev->instances + (idx << kdev->inst_shift)) 53 54 #define for_each_handle_rcu(qh, inst) \ 55 list_for_each_entry_rcu(qh, &inst->handles, list) 56 57 #define for_each_instance(idx, inst, kdev) \ 58 for (idx = 0, inst = kdev->instances; \ 59 idx < (kdev)->num_queues_in_use; \ 60 idx++, inst = knav_queue_idx_to_inst(kdev, idx)) 61 62 /* All firmware file names end up here. List the firmware file names below. 63 * Newest followed by older ones. Search is done from start of the array 64 * until a firmware file is found. 65 */ 66 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; 67 68 /** 69 * knav_queue_notify: qmss queue notfier call 70 * 71 * @inst: qmss queue instance like accumulator 72 */ 73 void knav_queue_notify(struct knav_queue_inst *inst) 74 { 75 struct knav_queue *qh; 76 77 if (!inst) 78 return; 79 80 rcu_read_lock(); 81 for_each_handle_rcu(qh, inst) { 82 if (atomic_read(&qh->notifier_enabled) <= 0) 83 continue; 84 if (WARN_ON(!qh->notifier_fn)) 85 continue; 86 this_cpu_inc(qh->stats->notifies); 87 qh->notifier_fn(qh->notifier_fn_arg); 88 } 89 rcu_read_unlock(); 90 } 91 EXPORT_SYMBOL_GPL(knav_queue_notify); 92 93 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) 94 { 95 struct knav_queue_inst *inst = _instdata; 96 97 knav_queue_notify(inst); 98 return IRQ_HANDLED; 99 } 100 101 static int knav_queue_setup_irq(struct knav_range_info *range, 102 struct knav_queue_inst *inst) 103 { 104 unsigned queue = inst->id - range->queue_base; 105 unsigned long cpu_map; 106 int ret = 0, irq; 107 108 if (range->flags & RANGE_HAS_IRQ) { 109 irq = range->irqs[queue].irq; 110 cpu_map = range->irqs[queue].cpu_map; 111 ret = request_irq(irq, knav_queue_int_handler, 0, 112 inst->irq_name, inst); 113 if (ret) 114 return ret; 115 disable_irq(irq); 116 if (cpu_map) { 117 ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); 118 if (ret) { 119 dev_warn(range->kdev->dev, 120 "Failed to set IRQ affinity\n"); 121 return ret; 122 } 123 } 124 } 125 return ret; 126 } 127 128 static void knav_queue_free_irq(struct knav_queue_inst *inst) 129 { 130 struct knav_range_info *range = inst->range; 131 unsigned queue = inst->id - inst->range->queue_base; 132 int irq; 133 134 if (range->flags & RANGE_HAS_IRQ) { 135 irq = range->irqs[queue].irq; 136 irq_set_affinity_hint(irq, NULL); 137 free_irq(irq, inst); 138 } 139 } 140 141 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) 142 { 143 return !list_empty(&inst->handles); 144 } 145 146 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) 147 { 148 return inst->range->flags & RANGE_RESERVED; 149 } 150 151 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) 152 { 153 struct knav_queue *tmp; 154 155 rcu_read_lock(); 156 for_each_handle_rcu(tmp, inst) { 157 if (tmp->flags & KNAV_QUEUE_SHARED) { 158 rcu_read_unlock(); 159 return true; 160 } 161 } 162 rcu_read_unlock(); 163 return false; 164 } 165 166 static inline bool knav_queue_match_type(struct knav_queue_inst *inst, 167 unsigned type) 168 { 169 if ((type == KNAV_QUEUE_QPEND) && 170 (inst->range->flags & RANGE_HAS_IRQ)) { 171 return true; 172 } else if ((type == KNAV_QUEUE_ACC) && 173 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { 174 return true; 175 } else if ((type == KNAV_QUEUE_GP) && 176 !(inst->range->flags & 177 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { 178 return true; 179 } 180 return false; 181 } 182 183 static inline struct knav_queue_inst * 184 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) 185 { 186 struct knav_queue_inst *inst; 187 int idx; 188 189 for_each_instance(idx, inst, kdev) { 190 if (inst->id == id) 191 return inst; 192 } 193 return NULL; 194 } 195 196 static inline struct knav_queue_inst *knav_queue_find_by_id(int id) 197 { 198 if (kdev->base_id <= id && 199 kdev->base_id + kdev->num_queues > id) { 200 id -= kdev->base_id; 201 return knav_queue_match_id_to_inst(kdev, id); 202 } 203 return NULL; 204 } 205 206 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, 207 const char *name, unsigned flags) 208 { 209 struct knav_queue *qh; 210 unsigned id; 211 int ret = 0; 212 213 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); 214 if (!qh) 215 return ERR_PTR(-ENOMEM); 216 217 qh->stats = alloc_percpu(struct knav_queue_stats); 218 if (!qh->stats) { 219 ret = -ENOMEM; 220 goto err; 221 } 222 223 qh->flags = flags; 224 qh->inst = inst; 225 id = inst->id - inst->qmgr->start_queue; 226 qh->reg_push = &inst->qmgr->reg_push[id]; 227 qh->reg_pop = &inst->qmgr->reg_pop[id]; 228 qh->reg_peek = &inst->qmgr->reg_peek[id]; 229 230 /* first opener? */ 231 if (!knav_queue_is_busy(inst)) { 232 struct knav_range_info *range = inst->range; 233 234 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL); 235 if (range->ops && range->ops->open_queue) 236 ret = range->ops->open_queue(range, inst, flags); 237 238 if (ret) 239 goto err; 240 } 241 list_add_tail_rcu(&qh->list, &inst->handles); 242 return qh; 243 244 err: 245 if (qh->stats) 246 free_percpu(qh->stats); 247 devm_kfree(inst->kdev->dev, qh); 248 return ERR_PTR(ret); 249 } 250 251 static struct knav_queue * 252 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) 253 { 254 struct knav_queue_inst *inst; 255 struct knav_queue *qh; 256 257 mutex_lock(&knav_dev_lock); 258 259 qh = ERR_PTR(-ENODEV); 260 inst = knav_queue_find_by_id(id); 261 if (!inst) 262 goto unlock_ret; 263 264 qh = ERR_PTR(-EEXIST); 265 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) 266 goto unlock_ret; 267 268 qh = ERR_PTR(-EBUSY); 269 if ((flags & KNAV_QUEUE_SHARED) && 270 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) 271 goto unlock_ret; 272 273 qh = __knav_queue_open(inst, name, flags); 274 275 unlock_ret: 276 mutex_unlock(&knav_dev_lock); 277 278 return qh; 279 } 280 281 static struct knav_queue *knav_queue_open_by_type(const char *name, 282 unsigned type, unsigned flags) 283 { 284 struct knav_queue_inst *inst; 285 struct knav_queue *qh = ERR_PTR(-EINVAL); 286 int idx; 287 288 mutex_lock(&knav_dev_lock); 289 290 for_each_instance(idx, inst, kdev) { 291 if (knav_queue_is_reserved(inst)) 292 continue; 293 if (!knav_queue_match_type(inst, type)) 294 continue; 295 if (knav_queue_is_busy(inst)) 296 continue; 297 qh = __knav_queue_open(inst, name, flags); 298 goto unlock_ret; 299 } 300 301 unlock_ret: 302 mutex_unlock(&knav_dev_lock); 303 return qh; 304 } 305 306 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) 307 { 308 struct knav_range_info *range = inst->range; 309 310 if (range->ops && range->ops->set_notify) 311 range->ops->set_notify(range, inst, enabled); 312 } 313 314 static int knav_queue_enable_notifier(struct knav_queue *qh) 315 { 316 struct knav_queue_inst *inst = qh->inst; 317 bool first; 318 319 if (WARN_ON(!qh->notifier_fn)) 320 return -EINVAL; 321 322 /* Adjust the per handle notifier count */ 323 first = (atomic_inc_return(&qh->notifier_enabled) == 1); 324 if (!first) 325 return 0; /* nothing to do */ 326 327 /* Now adjust the per instance notifier count */ 328 first = (atomic_inc_return(&inst->num_notifiers) == 1); 329 if (first) 330 knav_queue_set_notify(inst, true); 331 332 return 0; 333 } 334 335 static int knav_queue_disable_notifier(struct knav_queue *qh) 336 { 337 struct knav_queue_inst *inst = qh->inst; 338 bool last; 339 340 last = (atomic_dec_return(&qh->notifier_enabled) == 0); 341 if (!last) 342 return 0; /* nothing to do */ 343 344 last = (atomic_dec_return(&inst->num_notifiers) == 0); 345 if (last) 346 knav_queue_set_notify(inst, false); 347 348 return 0; 349 } 350 351 static int knav_queue_set_notifier(struct knav_queue *qh, 352 struct knav_queue_notify_config *cfg) 353 { 354 knav_queue_notify_fn old_fn = qh->notifier_fn; 355 356 if (!cfg) 357 return -EINVAL; 358 359 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) 360 return -ENOTSUPP; 361 362 if (!cfg->fn && old_fn) 363 knav_queue_disable_notifier(qh); 364 365 qh->notifier_fn = cfg->fn; 366 qh->notifier_fn_arg = cfg->fn_arg; 367 368 if (cfg->fn && !old_fn) 369 knav_queue_enable_notifier(qh); 370 371 return 0; 372 } 373 374 static int knav_gp_set_notify(struct knav_range_info *range, 375 struct knav_queue_inst *inst, 376 bool enabled) 377 { 378 unsigned queue; 379 380 if (range->flags & RANGE_HAS_IRQ) { 381 queue = inst->id - range->queue_base; 382 if (enabled) 383 enable_irq(range->irqs[queue].irq); 384 else 385 disable_irq_nosync(range->irqs[queue].irq); 386 } 387 return 0; 388 } 389 390 static int knav_gp_open_queue(struct knav_range_info *range, 391 struct knav_queue_inst *inst, unsigned flags) 392 { 393 return knav_queue_setup_irq(range, inst); 394 } 395 396 static int knav_gp_close_queue(struct knav_range_info *range, 397 struct knav_queue_inst *inst) 398 { 399 knav_queue_free_irq(inst); 400 return 0; 401 } 402 403 struct knav_range_ops knav_gp_range_ops = { 404 .set_notify = knav_gp_set_notify, 405 .open_queue = knav_gp_open_queue, 406 .close_queue = knav_gp_close_queue, 407 }; 408 409 410 static int knav_queue_get_count(void *qhandle) 411 { 412 struct knav_queue *qh = qhandle; 413 struct knav_queue_inst *inst = qh->inst; 414 415 return readl_relaxed(&qh->reg_peek[0].entry_count) + 416 atomic_read(&inst->desc_count); 417 } 418 419 static void knav_queue_debug_show_instance(struct seq_file *s, 420 struct knav_queue_inst *inst) 421 { 422 struct knav_device *kdev = inst->kdev; 423 struct knav_queue *qh; 424 int cpu = 0; 425 int pushes = 0; 426 int pops = 0; 427 int push_errors = 0; 428 int pop_errors = 0; 429 int notifies = 0; 430 431 if (!knav_queue_is_busy(inst)) 432 return; 433 434 seq_printf(s, "\tqueue id %d (%s)\n", 435 kdev->base_id + inst->id, inst->name); 436 for_each_handle_rcu(qh, inst) { 437 for_each_possible_cpu(cpu) { 438 pushes += per_cpu_ptr(qh->stats, cpu)->pushes; 439 pops += per_cpu_ptr(qh->stats, cpu)->pops; 440 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors; 441 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors; 442 notifies += per_cpu_ptr(qh->stats, cpu)->notifies; 443 } 444 445 seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n", 446 qh, 447 pushes, 448 pops, 449 knav_queue_get_count(qh), 450 notifies, 451 push_errors, 452 pop_errors); 453 } 454 } 455 456 static int knav_queue_debug_show(struct seq_file *s, void *v) 457 { 458 struct knav_queue_inst *inst; 459 int idx; 460 461 mutex_lock(&knav_dev_lock); 462 seq_printf(s, "%s: %u-%u\n", 463 dev_name(kdev->dev), kdev->base_id, 464 kdev->base_id + kdev->num_queues - 1); 465 for_each_instance(idx, inst, kdev) 466 knav_queue_debug_show_instance(s, inst); 467 mutex_unlock(&knav_dev_lock); 468 469 return 0; 470 } 471 472 static int knav_queue_debug_open(struct inode *inode, struct file *file) 473 { 474 return single_open(file, knav_queue_debug_show, NULL); 475 } 476 477 static const struct file_operations knav_queue_debug_ops = { 478 .open = knav_queue_debug_open, 479 .read = seq_read, 480 .llseek = seq_lseek, 481 .release = single_release, 482 }; 483 484 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, 485 u32 flags) 486 { 487 unsigned long end; 488 u32 val = 0; 489 490 end = jiffies + msecs_to_jiffies(timeout); 491 while (time_after(end, jiffies)) { 492 val = readl_relaxed(addr); 493 if (flags) 494 val &= flags; 495 if (!val) 496 break; 497 cpu_relax(); 498 } 499 return val ? -ETIMEDOUT : 0; 500 } 501 502 503 static int knav_queue_flush(struct knav_queue *qh) 504 { 505 struct knav_queue_inst *inst = qh->inst; 506 unsigned id = inst->id - inst->qmgr->start_queue; 507 508 atomic_set(&inst->desc_count, 0); 509 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); 510 return 0; 511 } 512 513 /** 514 * knav_queue_open() - open a hardware queue 515 * @name - name to give the queue handle 516 * @id - desired queue number if any or specifes the type 517 * of queue 518 * @flags - the following flags are applicable to queues: 519 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are 520 * exclusive by default. 521 * Subsequent attempts to open a shared queue should 522 * also have this flag. 523 * 524 * Returns a handle to the open hardware queue if successful. Use IS_ERR() 525 * to check the returned value for error codes. 526 */ 527 void *knav_queue_open(const char *name, unsigned id, 528 unsigned flags) 529 { 530 struct knav_queue *qh = ERR_PTR(-EINVAL); 531 532 switch (id) { 533 case KNAV_QUEUE_QPEND: 534 case KNAV_QUEUE_ACC: 535 case KNAV_QUEUE_GP: 536 qh = knav_queue_open_by_type(name, id, flags); 537 break; 538 539 default: 540 qh = knav_queue_open_by_id(name, id, flags); 541 break; 542 } 543 return qh; 544 } 545 EXPORT_SYMBOL_GPL(knav_queue_open); 546 547 /** 548 * knav_queue_close() - close a hardware queue handle 549 * @qh - handle to close 550 */ 551 void knav_queue_close(void *qhandle) 552 { 553 struct knav_queue *qh = qhandle; 554 struct knav_queue_inst *inst = qh->inst; 555 556 while (atomic_read(&qh->notifier_enabled) > 0) 557 knav_queue_disable_notifier(qh); 558 559 mutex_lock(&knav_dev_lock); 560 list_del_rcu(&qh->list); 561 mutex_unlock(&knav_dev_lock); 562 synchronize_rcu(); 563 if (!knav_queue_is_busy(inst)) { 564 struct knav_range_info *range = inst->range; 565 566 if (range->ops && range->ops->close_queue) 567 range->ops->close_queue(range, inst); 568 } 569 free_percpu(qh->stats); 570 devm_kfree(inst->kdev->dev, qh); 571 } 572 EXPORT_SYMBOL_GPL(knav_queue_close); 573 574 /** 575 * knav_queue_device_control() - Perform control operations on a queue 576 * @qh - queue handle 577 * @cmd - control commands 578 * @arg - command argument 579 * 580 * Returns 0 on success, errno otherwise. 581 */ 582 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, 583 unsigned long arg) 584 { 585 struct knav_queue *qh = qhandle; 586 struct knav_queue_notify_config *cfg; 587 int ret; 588 589 switch ((int)cmd) { 590 case KNAV_QUEUE_GET_ID: 591 ret = qh->inst->kdev->base_id + qh->inst->id; 592 break; 593 594 case KNAV_QUEUE_FLUSH: 595 ret = knav_queue_flush(qh); 596 break; 597 598 case KNAV_QUEUE_SET_NOTIFIER: 599 cfg = (void *)arg; 600 ret = knav_queue_set_notifier(qh, cfg); 601 break; 602 603 case KNAV_QUEUE_ENABLE_NOTIFY: 604 ret = knav_queue_enable_notifier(qh); 605 break; 606 607 case KNAV_QUEUE_DISABLE_NOTIFY: 608 ret = knav_queue_disable_notifier(qh); 609 break; 610 611 case KNAV_QUEUE_GET_COUNT: 612 ret = knav_queue_get_count(qh); 613 break; 614 615 default: 616 ret = -ENOTSUPP; 617 break; 618 } 619 return ret; 620 } 621 EXPORT_SYMBOL_GPL(knav_queue_device_control); 622 623 624 625 /** 626 * knav_queue_push() - push data (or descriptor) to the tail of a queue 627 * @qh - hardware queue handle 628 * @data - data to push 629 * @size - size of data to push 630 * @flags - can be used to pass additional information 631 * 632 * Returns 0 on success, errno otherwise. 633 */ 634 int knav_queue_push(void *qhandle, dma_addr_t dma, 635 unsigned size, unsigned flags) 636 { 637 struct knav_queue *qh = qhandle; 638 u32 val; 639 640 val = (u32)dma | ((size / 16) - 1); 641 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); 642 643 this_cpu_inc(qh->stats->pushes); 644 return 0; 645 } 646 EXPORT_SYMBOL_GPL(knav_queue_push); 647 648 /** 649 * knav_queue_pop() - pop data (or descriptor) from the head of a queue 650 * @qh - hardware queue handle 651 * @size - (optional) size of the data pop'ed. 652 * 653 * Returns a DMA address on success, 0 on failure. 654 */ 655 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) 656 { 657 struct knav_queue *qh = qhandle; 658 struct knav_queue_inst *inst = qh->inst; 659 dma_addr_t dma; 660 u32 val, idx; 661 662 /* are we accumulated? */ 663 if (inst->descs) { 664 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { 665 atomic_inc(&inst->desc_count); 666 return 0; 667 } 668 idx = atomic_inc_return(&inst->desc_head); 669 idx &= ACC_DESCS_MASK; 670 val = inst->descs[idx]; 671 } else { 672 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); 673 if (unlikely(!val)) 674 return 0; 675 } 676 677 dma = val & DESC_PTR_MASK; 678 if (size) 679 *size = ((val & DESC_SIZE_MASK) + 1) * 16; 680 681 this_cpu_inc(qh->stats->pops); 682 return dma; 683 } 684 EXPORT_SYMBOL_GPL(knav_queue_pop); 685 686 /* carve out descriptors and push into queue */ 687 static void kdesc_fill_pool(struct knav_pool *pool) 688 { 689 struct knav_region *region; 690 int i; 691 692 region = pool->region; 693 pool->desc_size = region->desc_size; 694 for (i = 0; i < pool->num_desc; i++) { 695 int index = pool->region_offset + i; 696 dma_addr_t dma_addr; 697 unsigned dma_size; 698 dma_addr = region->dma_start + (region->desc_size * index); 699 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); 700 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, 701 DMA_TO_DEVICE); 702 knav_queue_push(pool->queue, dma_addr, dma_size, 0); 703 } 704 } 705 706 /* pop out descriptors and close the queue */ 707 static void kdesc_empty_pool(struct knav_pool *pool) 708 { 709 dma_addr_t dma; 710 unsigned size; 711 void *desc; 712 int i; 713 714 if (!pool->queue) 715 return; 716 717 for (i = 0;; i++) { 718 dma = knav_queue_pop(pool->queue, &size); 719 if (!dma) 720 break; 721 desc = knav_pool_desc_dma_to_virt(pool, dma); 722 if (!desc) { 723 dev_dbg(pool->kdev->dev, 724 "couldn't unmap desc, continuing\n"); 725 continue; 726 } 727 } 728 WARN_ON(i != pool->num_desc); 729 knav_queue_close(pool->queue); 730 } 731 732 733 /* Get the DMA address of a descriptor */ 734 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) 735 { 736 struct knav_pool *pool = ph; 737 return pool->region->dma_start + (virt - pool->region->virt_start); 738 } 739 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma); 740 741 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) 742 { 743 struct knav_pool *pool = ph; 744 return pool->region->virt_start + (dma - pool->region->dma_start); 745 } 746 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt); 747 748 /** 749 * knav_pool_create() - Create a pool of descriptors 750 * @name - name to give the pool handle 751 * @num_desc - numbers of descriptors in the pool 752 * @region_id - QMSS region id from which the descriptors are to be 753 * allocated. 754 * 755 * Returns a pool handle on success. 756 * Use IS_ERR_OR_NULL() to identify error values on return. 757 */ 758 void *knav_pool_create(const char *name, 759 int num_desc, int region_id) 760 { 761 struct knav_region *reg_itr, *region = NULL; 762 struct knav_pool *pool, *pi; 763 struct list_head *node; 764 unsigned last_offset; 765 bool slot_found; 766 int ret; 767 768 if (!kdev) 769 return ERR_PTR(-EPROBE_DEFER); 770 771 if (!kdev->dev) 772 return ERR_PTR(-ENODEV); 773 774 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); 775 if (!pool) { 776 dev_err(kdev->dev, "out of memory allocating pool\n"); 777 return ERR_PTR(-ENOMEM); 778 } 779 780 for_each_region(kdev, reg_itr) { 781 if (reg_itr->id != region_id) 782 continue; 783 region = reg_itr; 784 break; 785 } 786 787 if (!region) { 788 dev_err(kdev->dev, "region-id(%d) not found\n", region_id); 789 ret = -EINVAL; 790 goto err; 791 } 792 793 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); 794 if (IS_ERR_OR_NULL(pool->queue)) { 795 dev_err(kdev->dev, 796 "failed to open queue for pool(%s), error %ld\n", 797 name, PTR_ERR(pool->queue)); 798 ret = PTR_ERR(pool->queue); 799 goto err; 800 } 801 802 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL); 803 pool->kdev = kdev; 804 pool->dev = kdev->dev; 805 806 mutex_lock(&knav_dev_lock); 807 808 if (num_desc > (region->num_desc - region->used_desc)) { 809 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", 810 region_id, name); 811 ret = -ENOMEM; 812 goto err_unlock; 813 } 814 815 /* Region maintains a sorted (by region offset) list of pools 816 * use the first free slot which is large enough to accomodate 817 * the request 818 */ 819 last_offset = 0; 820 slot_found = false; 821 node = ®ion->pools; 822 list_for_each_entry(pi, ®ion->pools, region_inst) { 823 if ((pi->region_offset - last_offset) >= num_desc) { 824 slot_found = true; 825 break; 826 } 827 last_offset = pi->region_offset + pi->num_desc; 828 } 829 node = &pi->region_inst; 830 831 if (slot_found) { 832 pool->region = region; 833 pool->num_desc = num_desc; 834 pool->region_offset = last_offset; 835 region->used_desc += num_desc; 836 list_add_tail(&pool->list, &kdev->pools); 837 list_add_tail(&pool->region_inst, node); 838 } else { 839 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", 840 name, region_id); 841 ret = -ENOMEM; 842 goto err_unlock; 843 } 844 845 mutex_unlock(&knav_dev_lock); 846 kdesc_fill_pool(pool); 847 return pool; 848 849 err_unlock: 850 mutex_unlock(&knav_dev_lock); 851 err: 852 kfree(pool->name); 853 devm_kfree(kdev->dev, pool); 854 return ERR_PTR(ret); 855 } 856 EXPORT_SYMBOL_GPL(knav_pool_create); 857 858 /** 859 * knav_pool_destroy() - Free a pool of descriptors 860 * @pool - pool handle 861 */ 862 void knav_pool_destroy(void *ph) 863 { 864 struct knav_pool *pool = ph; 865 866 if (!pool) 867 return; 868 869 if (!pool->region) 870 return; 871 872 kdesc_empty_pool(pool); 873 mutex_lock(&knav_dev_lock); 874 875 pool->region->used_desc -= pool->num_desc; 876 list_del(&pool->region_inst); 877 list_del(&pool->list); 878 879 mutex_unlock(&knav_dev_lock); 880 kfree(pool->name); 881 devm_kfree(kdev->dev, pool); 882 } 883 EXPORT_SYMBOL_GPL(knav_pool_destroy); 884 885 886 /** 887 * knav_pool_desc_get() - Get a descriptor from the pool 888 * @pool - pool handle 889 * 890 * Returns descriptor from the pool. 891 */ 892 void *knav_pool_desc_get(void *ph) 893 { 894 struct knav_pool *pool = ph; 895 dma_addr_t dma; 896 unsigned size; 897 void *data; 898 899 dma = knav_queue_pop(pool->queue, &size); 900 if (unlikely(!dma)) 901 return ERR_PTR(-ENOMEM); 902 data = knav_pool_desc_dma_to_virt(pool, dma); 903 return data; 904 } 905 EXPORT_SYMBOL_GPL(knav_pool_desc_get); 906 907 /** 908 * knav_pool_desc_put() - return a descriptor to the pool 909 * @pool - pool handle 910 */ 911 void knav_pool_desc_put(void *ph, void *desc) 912 { 913 struct knav_pool *pool = ph; 914 dma_addr_t dma; 915 dma = knav_pool_desc_virt_to_dma(pool, desc); 916 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); 917 } 918 EXPORT_SYMBOL_GPL(knav_pool_desc_put); 919 920 /** 921 * knav_pool_desc_map() - Map descriptor for DMA transfer 922 * @pool - pool handle 923 * @desc - address of descriptor to map 924 * @size - size of descriptor to map 925 * @dma - DMA address return pointer 926 * @dma_sz - adjusted return pointer 927 * 928 * Returns 0 on success, errno otherwise. 929 */ 930 int knav_pool_desc_map(void *ph, void *desc, unsigned size, 931 dma_addr_t *dma, unsigned *dma_sz) 932 { 933 struct knav_pool *pool = ph; 934 *dma = knav_pool_desc_virt_to_dma(pool, desc); 935 size = min(size, pool->region->desc_size); 936 size = ALIGN(size, SMP_CACHE_BYTES); 937 *dma_sz = size; 938 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); 939 940 /* Ensure the descriptor reaches to the memory */ 941 __iowmb(); 942 943 return 0; 944 } 945 EXPORT_SYMBOL_GPL(knav_pool_desc_map); 946 947 /** 948 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer 949 * @pool - pool handle 950 * @dma - DMA address of descriptor to unmap 951 * @dma_sz - size of descriptor to unmap 952 * 953 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify 954 * error values on return. 955 */ 956 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) 957 { 958 struct knav_pool *pool = ph; 959 unsigned desc_sz; 960 void *desc; 961 962 desc_sz = min(dma_sz, pool->region->desc_size); 963 desc = knav_pool_desc_dma_to_virt(pool, dma); 964 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); 965 prefetch(desc); 966 return desc; 967 } 968 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap); 969 970 /** 971 * knav_pool_count() - Get the number of descriptors in pool. 972 * @pool - pool handle 973 * Returns number of elements in the pool. 974 */ 975 int knav_pool_count(void *ph) 976 { 977 struct knav_pool *pool = ph; 978 return knav_queue_get_count(pool->queue); 979 } 980 EXPORT_SYMBOL_GPL(knav_pool_count); 981 982 static void knav_queue_setup_region(struct knav_device *kdev, 983 struct knav_region *region) 984 { 985 unsigned hw_num_desc, hw_desc_size, size; 986 struct knav_reg_region __iomem *regs; 987 struct knav_qmgr_info *qmgr; 988 struct knav_pool *pool; 989 int id = region->id; 990 struct page *page; 991 992 /* unused region? */ 993 if (!region->num_desc) { 994 dev_warn(kdev->dev, "unused region %s\n", region->name); 995 return; 996 } 997 998 /* get hardware descriptor value */ 999 hw_num_desc = ilog2(region->num_desc - 1) + 1; 1000 1001 /* did we force fit ourselves into nothingness? */ 1002 if (region->num_desc < 32) { 1003 region->num_desc = 0; 1004 dev_warn(kdev->dev, "too few descriptors in region %s\n", 1005 region->name); 1006 return; 1007 } 1008 1009 size = region->num_desc * region->desc_size; 1010 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | 1011 GFP_DMA32); 1012 if (!region->virt_start) { 1013 region->num_desc = 0; 1014 dev_err(kdev->dev, "memory alloc failed for region %s\n", 1015 region->name); 1016 return; 1017 } 1018 region->virt_end = region->virt_start + size; 1019 page = virt_to_page(region->virt_start); 1020 1021 region->dma_start = dma_map_page(kdev->dev, page, 0, size, 1022 DMA_BIDIRECTIONAL); 1023 if (dma_mapping_error(kdev->dev, region->dma_start)) { 1024 dev_err(kdev->dev, "dma map failed for region %s\n", 1025 region->name); 1026 goto fail; 1027 } 1028 region->dma_end = region->dma_start + size; 1029 1030 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); 1031 if (!pool) { 1032 dev_err(kdev->dev, "out of memory allocating dummy pool\n"); 1033 goto fail; 1034 } 1035 pool->num_desc = 0; 1036 pool->region_offset = region->num_desc; 1037 list_add(&pool->region_inst, ®ion->pools); 1038 1039 dev_dbg(kdev->dev, 1040 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n", 1041 region->name, id, region->desc_size, region->num_desc, 1042 region->link_index, ®ion->dma_start, ®ion->dma_end, 1043 region->virt_start, region->virt_end); 1044 1045 hw_desc_size = (region->desc_size / 16) - 1; 1046 hw_num_desc -= 5; 1047 1048 for_each_qmgr(kdev, qmgr) { 1049 regs = qmgr->reg_region + id; 1050 writel_relaxed((u32)region->dma_start, ®s->base); 1051 writel_relaxed(region->link_index, ®s->start_index); 1052 writel_relaxed(hw_desc_size << 16 | hw_num_desc, 1053 ®s->size_count); 1054 } 1055 return; 1056 1057 fail: 1058 if (region->dma_start) 1059 dma_unmap_page(kdev->dev, region->dma_start, size, 1060 DMA_BIDIRECTIONAL); 1061 if (region->virt_start) 1062 free_pages_exact(region->virt_start, size); 1063 region->num_desc = 0; 1064 return; 1065 } 1066 1067 static const char *knav_queue_find_name(struct device_node *node) 1068 { 1069 const char *name; 1070 1071 if (of_property_read_string(node, "label", &name) < 0) 1072 name = node->name; 1073 if (!name) 1074 name = "unknown"; 1075 return name; 1076 } 1077 1078 static int knav_queue_setup_regions(struct knav_device *kdev, 1079 struct device_node *regions) 1080 { 1081 struct device *dev = kdev->dev; 1082 struct knav_region *region; 1083 struct device_node *child; 1084 u32 temp[2]; 1085 int ret; 1086 1087 for_each_child_of_node(regions, child) { 1088 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); 1089 if (!region) { 1090 dev_err(dev, "out of memory allocating region\n"); 1091 return -ENOMEM; 1092 } 1093 1094 region->name = knav_queue_find_name(child); 1095 of_property_read_u32(child, "id", ®ion->id); 1096 ret = of_property_read_u32_array(child, "region-spec", temp, 2); 1097 if (!ret) { 1098 region->num_desc = temp[0]; 1099 region->desc_size = temp[1]; 1100 } else { 1101 dev_err(dev, "invalid region info %s\n", region->name); 1102 devm_kfree(dev, region); 1103 continue; 1104 } 1105 1106 if (!of_get_property(child, "link-index", NULL)) { 1107 dev_err(dev, "No link info for %s\n", region->name); 1108 devm_kfree(dev, region); 1109 continue; 1110 } 1111 ret = of_property_read_u32(child, "link-index", 1112 ®ion->link_index); 1113 if (ret) { 1114 dev_err(dev, "link index not found for %s\n", 1115 region->name); 1116 devm_kfree(dev, region); 1117 continue; 1118 } 1119 1120 INIT_LIST_HEAD(®ion->pools); 1121 list_add_tail(®ion->list, &kdev->regions); 1122 } 1123 if (list_empty(&kdev->regions)) { 1124 dev_err(dev, "no valid region information found\n"); 1125 return -ENODEV; 1126 } 1127 1128 /* Next, we run through the regions and set things up */ 1129 for_each_region(kdev, region) 1130 knav_queue_setup_region(kdev, region); 1131 1132 return 0; 1133 } 1134 1135 static int knav_get_link_ram(struct knav_device *kdev, 1136 const char *name, 1137 struct knav_link_ram_block *block) 1138 { 1139 struct platform_device *pdev = to_platform_device(kdev->dev); 1140 struct device_node *node = pdev->dev.of_node; 1141 u32 temp[2]; 1142 1143 /* 1144 * Note: link ram resources are specified in "entry" sized units. In 1145 * reality, although entries are ~40bits in hardware, we treat them as 1146 * 64-bit entities here. 1147 * 1148 * For example, to specify the internal link ram for Keystone-I class 1149 * devices, we would set the linkram0 resource to 0x80000-0x83fff. 1150 * 1151 * This gets a bit weird when other link rams are used. For example, 1152 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries 1153 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, 1154 * which accounts for 64-bits per entry, for 16K entries. 1155 */ 1156 if (!of_property_read_u32_array(node, name , temp, 2)) { 1157 if (temp[0]) { 1158 /* 1159 * queue_base specified => using internal or onchip 1160 * link ram WARNING - we do not "reserve" this block 1161 */ 1162 block->dma = (dma_addr_t)temp[0]; 1163 block->virt = NULL; 1164 block->size = temp[1]; 1165 } else { 1166 block->size = temp[1]; 1167 /* queue_base not specific => allocate requested size */ 1168 block->virt = dmam_alloc_coherent(kdev->dev, 1169 8 * block->size, &block->dma, 1170 GFP_KERNEL); 1171 if (!block->virt) { 1172 dev_err(kdev->dev, "failed to alloc linkram\n"); 1173 return -ENOMEM; 1174 } 1175 } 1176 } else { 1177 return -ENODEV; 1178 } 1179 return 0; 1180 } 1181 1182 static int knav_queue_setup_link_ram(struct knav_device *kdev) 1183 { 1184 struct knav_link_ram_block *block; 1185 struct knav_qmgr_info *qmgr; 1186 1187 for_each_qmgr(kdev, qmgr) { 1188 block = &kdev->link_rams[0]; 1189 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n", 1190 &block->dma, block->virt, block->size); 1191 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0); 1192 writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); 1193 1194 block++; 1195 if (!block->size) 1196 continue; 1197 1198 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n", 1199 &block->dma, block->virt, block->size); 1200 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1); 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int knav_setup_queue_range(struct knav_device *kdev, 1207 struct device_node *node) 1208 { 1209 struct device *dev = kdev->dev; 1210 struct knav_range_info *range; 1211 struct knav_qmgr_info *qmgr; 1212 u32 temp[2], start, end, id, index; 1213 int ret, i; 1214 1215 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); 1216 if (!range) { 1217 dev_err(dev, "out of memory allocating range\n"); 1218 return -ENOMEM; 1219 } 1220 1221 range->kdev = kdev; 1222 range->name = knav_queue_find_name(node); 1223 ret = of_property_read_u32_array(node, "qrange", temp, 2); 1224 if (!ret) { 1225 range->queue_base = temp[0] - kdev->base_id; 1226 range->num_queues = temp[1]; 1227 } else { 1228 dev_err(dev, "invalid queue range %s\n", range->name); 1229 devm_kfree(dev, range); 1230 return -EINVAL; 1231 } 1232 1233 for (i = 0; i < RANGE_MAX_IRQS; i++) { 1234 struct of_phandle_args oirq; 1235 1236 if (of_irq_parse_one(node, i, &oirq)) 1237 break; 1238 1239 range->irqs[i].irq = irq_create_of_mapping(&oirq); 1240 if (range->irqs[i].irq == IRQ_NONE) 1241 break; 1242 1243 range->num_irqs++; 1244 1245 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) 1246 range->irqs[i].cpu_map = 1247 (oirq.args[2] & 0x0000ff00) >> 8; 1248 } 1249 1250 range->num_irqs = min(range->num_irqs, range->num_queues); 1251 if (range->num_irqs) 1252 range->flags |= RANGE_HAS_IRQ; 1253 1254 if (of_get_property(node, "qalloc-by-id", NULL)) 1255 range->flags |= RANGE_RESERVED; 1256 1257 if (of_get_property(node, "accumulator", NULL)) { 1258 ret = knav_init_acc_range(kdev, node, range); 1259 if (ret < 0) { 1260 devm_kfree(dev, range); 1261 return ret; 1262 } 1263 } else { 1264 range->ops = &knav_gp_range_ops; 1265 } 1266 1267 /* set threshold to 1, and flush out the queues */ 1268 for_each_qmgr(kdev, qmgr) { 1269 start = max(qmgr->start_queue, range->queue_base); 1270 end = min(qmgr->start_queue + qmgr->num_queues, 1271 range->queue_base + range->num_queues); 1272 for (id = start; id < end; id++) { 1273 index = id - qmgr->start_queue; 1274 writel_relaxed(THRESH_GTE | 1, 1275 &qmgr->reg_peek[index].ptr_size_thresh); 1276 writel_relaxed(0, 1277 &qmgr->reg_push[index].ptr_size_thresh); 1278 } 1279 } 1280 1281 list_add_tail(&range->list, &kdev->queue_ranges); 1282 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", 1283 range->name, range->queue_base, 1284 range->queue_base + range->num_queues - 1, 1285 range->num_irqs, 1286 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", 1287 (range->flags & RANGE_RESERVED) ? ", reserved" : "", 1288 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); 1289 kdev->num_queues_in_use += range->num_queues; 1290 return 0; 1291 } 1292 1293 static int knav_setup_queue_pools(struct knav_device *kdev, 1294 struct device_node *queue_pools) 1295 { 1296 struct device_node *type, *range; 1297 int ret; 1298 1299 for_each_child_of_node(queue_pools, type) { 1300 for_each_child_of_node(type, range) { 1301 ret = knav_setup_queue_range(kdev, range); 1302 /* return value ignored, we init the rest... */ 1303 } 1304 } 1305 1306 /* ... and barf if they all failed! */ 1307 if (list_empty(&kdev->queue_ranges)) { 1308 dev_err(kdev->dev, "no valid queue range found\n"); 1309 return -ENODEV; 1310 } 1311 return 0; 1312 } 1313 1314 static void knav_free_queue_range(struct knav_device *kdev, 1315 struct knav_range_info *range) 1316 { 1317 if (range->ops && range->ops->free_range) 1318 range->ops->free_range(range); 1319 list_del(&range->list); 1320 devm_kfree(kdev->dev, range); 1321 } 1322 1323 static void knav_free_queue_ranges(struct knav_device *kdev) 1324 { 1325 struct knav_range_info *range; 1326 1327 for (;;) { 1328 range = first_queue_range(kdev); 1329 if (!range) 1330 break; 1331 knav_free_queue_range(kdev, range); 1332 } 1333 } 1334 1335 static void knav_queue_free_regions(struct knav_device *kdev) 1336 { 1337 struct knav_region *region; 1338 struct knav_pool *pool, *tmp; 1339 unsigned size; 1340 1341 for (;;) { 1342 region = first_region(kdev); 1343 if (!region) 1344 break; 1345 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) 1346 knav_pool_destroy(pool); 1347 1348 size = region->virt_end - region->virt_start; 1349 if (size) 1350 free_pages_exact(region->virt_start, size); 1351 list_del(®ion->list); 1352 devm_kfree(kdev->dev, region); 1353 } 1354 } 1355 1356 static void __iomem *knav_queue_map_reg(struct knav_device *kdev, 1357 struct device_node *node, int index) 1358 { 1359 struct resource res; 1360 void __iomem *regs; 1361 int ret; 1362 1363 ret = of_address_to_resource(node, index, &res); 1364 if (ret) { 1365 dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n", 1366 node->name, index); 1367 return ERR_PTR(ret); 1368 } 1369 1370 regs = devm_ioremap_resource(kdev->dev, &res); 1371 if (IS_ERR(regs)) 1372 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n", 1373 index, node->name); 1374 return regs; 1375 } 1376 1377 static int knav_queue_init_qmgrs(struct knav_device *kdev, 1378 struct device_node *qmgrs) 1379 { 1380 struct device *dev = kdev->dev; 1381 struct knav_qmgr_info *qmgr; 1382 struct device_node *child; 1383 u32 temp[2]; 1384 int ret; 1385 1386 for_each_child_of_node(qmgrs, child) { 1387 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); 1388 if (!qmgr) { 1389 dev_err(dev, "out of memory allocating qmgr\n"); 1390 return -ENOMEM; 1391 } 1392 1393 ret = of_property_read_u32_array(child, "managed-queues", 1394 temp, 2); 1395 if (!ret) { 1396 qmgr->start_queue = temp[0]; 1397 qmgr->num_queues = temp[1]; 1398 } else { 1399 dev_err(dev, "invalid qmgr queue range\n"); 1400 devm_kfree(dev, qmgr); 1401 continue; 1402 } 1403 1404 dev_info(dev, "qmgr start queue %d, number of queues %d\n", 1405 qmgr->start_queue, qmgr->num_queues); 1406 1407 qmgr->reg_peek = 1408 knav_queue_map_reg(kdev, child, 1409 KNAV_QUEUE_PEEK_REG_INDEX); 1410 qmgr->reg_status = 1411 knav_queue_map_reg(kdev, child, 1412 KNAV_QUEUE_STATUS_REG_INDEX); 1413 qmgr->reg_config = 1414 knav_queue_map_reg(kdev, child, 1415 KNAV_QUEUE_CONFIG_REG_INDEX); 1416 qmgr->reg_region = 1417 knav_queue_map_reg(kdev, child, 1418 KNAV_QUEUE_REGION_REG_INDEX); 1419 qmgr->reg_push = 1420 knav_queue_map_reg(kdev, child, 1421 KNAV_QUEUE_PUSH_REG_INDEX); 1422 qmgr->reg_pop = 1423 knav_queue_map_reg(kdev, child, 1424 KNAV_QUEUE_POP_REG_INDEX); 1425 1426 if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || 1427 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || 1428 IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { 1429 dev_err(dev, "failed to map qmgr regs\n"); 1430 if (!IS_ERR(qmgr->reg_peek)) 1431 devm_iounmap(dev, qmgr->reg_peek); 1432 if (!IS_ERR(qmgr->reg_status)) 1433 devm_iounmap(dev, qmgr->reg_status); 1434 if (!IS_ERR(qmgr->reg_config)) 1435 devm_iounmap(dev, qmgr->reg_config); 1436 if (!IS_ERR(qmgr->reg_region)) 1437 devm_iounmap(dev, qmgr->reg_region); 1438 if (!IS_ERR(qmgr->reg_push)) 1439 devm_iounmap(dev, qmgr->reg_push); 1440 if (!IS_ERR(qmgr->reg_pop)) 1441 devm_iounmap(dev, qmgr->reg_pop); 1442 devm_kfree(dev, qmgr); 1443 continue; 1444 } 1445 1446 list_add_tail(&qmgr->list, &kdev->qmgrs); 1447 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", 1448 qmgr->start_queue, qmgr->num_queues, 1449 qmgr->reg_peek, qmgr->reg_status, 1450 qmgr->reg_config, qmgr->reg_region, 1451 qmgr->reg_push, qmgr->reg_pop); 1452 } 1453 return 0; 1454 } 1455 1456 static int knav_queue_init_pdsps(struct knav_device *kdev, 1457 struct device_node *pdsps) 1458 { 1459 struct device *dev = kdev->dev; 1460 struct knav_pdsp_info *pdsp; 1461 struct device_node *child; 1462 1463 for_each_child_of_node(pdsps, child) { 1464 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); 1465 if (!pdsp) { 1466 dev_err(dev, "out of memory allocating pdsp\n"); 1467 return -ENOMEM; 1468 } 1469 pdsp->name = knav_queue_find_name(child); 1470 pdsp->iram = 1471 knav_queue_map_reg(kdev, child, 1472 KNAV_QUEUE_PDSP_IRAM_REG_INDEX); 1473 pdsp->regs = 1474 knav_queue_map_reg(kdev, child, 1475 KNAV_QUEUE_PDSP_REGS_REG_INDEX); 1476 pdsp->intd = 1477 knav_queue_map_reg(kdev, child, 1478 KNAV_QUEUE_PDSP_INTD_REG_INDEX); 1479 pdsp->command = 1480 knav_queue_map_reg(kdev, child, 1481 KNAV_QUEUE_PDSP_CMD_REG_INDEX); 1482 1483 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || 1484 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { 1485 dev_err(dev, "failed to map pdsp %s regs\n", 1486 pdsp->name); 1487 if (!IS_ERR(pdsp->command)) 1488 devm_iounmap(dev, pdsp->command); 1489 if (!IS_ERR(pdsp->iram)) 1490 devm_iounmap(dev, pdsp->iram); 1491 if (!IS_ERR(pdsp->regs)) 1492 devm_iounmap(dev, pdsp->regs); 1493 if (!IS_ERR(pdsp->intd)) 1494 devm_iounmap(dev, pdsp->intd); 1495 devm_kfree(dev, pdsp); 1496 continue; 1497 } 1498 of_property_read_u32(child, "id", &pdsp->id); 1499 list_add_tail(&pdsp->list, &kdev->pdsps); 1500 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n", 1501 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, 1502 pdsp->intd); 1503 } 1504 return 0; 1505 } 1506 1507 static int knav_queue_stop_pdsp(struct knav_device *kdev, 1508 struct knav_pdsp_info *pdsp) 1509 { 1510 u32 val, timeout = 1000; 1511 int ret; 1512 1513 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; 1514 writel_relaxed(val, &pdsp->regs->control); 1515 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, 1516 PDSP_CTRL_RUNNING); 1517 if (ret < 0) { 1518 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); 1519 return ret; 1520 } 1521 pdsp->loaded = false; 1522 pdsp->started = false; 1523 return 0; 1524 } 1525 1526 static int knav_queue_load_pdsp(struct knav_device *kdev, 1527 struct knav_pdsp_info *pdsp) 1528 { 1529 int i, ret, fwlen; 1530 const struct firmware *fw; 1531 bool found = false; 1532 u32 *fwdata; 1533 1534 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { 1535 if (knav_acc_firmwares[i]) { 1536 ret = request_firmware_direct(&fw, 1537 knav_acc_firmwares[i], 1538 kdev->dev); 1539 if (!ret) { 1540 found = true; 1541 break; 1542 } 1543 } 1544 } 1545 1546 if (!found) { 1547 dev_err(kdev->dev, "failed to get firmware for pdsp\n"); 1548 return -ENODEV; 1549 } 1550 1551 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n", 1552 knav_acc_firmwares[i]); 1553 1554 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); 1555 /* download the firmware */ 1556 fwdata = (u32 *)fw->data; 1557 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); 1558 for (i = 0; i < fwlen; i++) 1559 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); 1560 1561 release_firmware(fw); 1562 return 0; 1563 } 1564 1565 static int knav_queue_start_pdsp(struct knav_device *kdev, 1566 struct knav_pdsp_info *pdsp) 1567 { 1568 u32 val, timeout = 1000; 1569 int ret; 1570 1571 /* write a command for sync */ 1572 writel_relaxed(0xffffffff, pdsp->command); 1573 while (readl_relaxed(pdsp->command) != 0xffffffff) 1574 cpu_relax(); 1575 1576 /* soft reset the PDSP */ 1577 val = readl_relaxed(&pdsp->regs->control); 1578 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); 1579 writel_relaxed(val, &pdsp->regs->control); 1580 1581 /* enable pdsp */ 1582 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; 1583 writel_relaxed(val, &pdsp->regs->control); 1584 1585 /* wait for command register to clear */ 1586 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); 1587 if (ret < 0) { 1588 dev_err(kdev->dev, 1589 "timed out on pdsp %s command register wait\n", 1590 pdsp->name); 1591 return ret; 1592 } 1593 return 0; 1594 } 1595 1596 static void knav_queue_stop_pdsps(struct knav_device *kdev) 1597 { 1598 struct knav_pdsp_info *pdsp; 1599 1600 /* disable all pdsps */ 1601 for_each_pdsp(kdev, pdsp) 1602 knav_queue_stop_pdsp(kdev, pdsp); 1603 } 1604 1605 static int knav_queue_start_pdsps(struct knav_device *kdev) 1606 { 1607 struct knav_pdsp_info *pdsp; 1608 int ret; 1609 1610 knav_queue_stop_pdsps(kdev); 1611 /* now load them all. We return success even if pdsp 1612 * is not loaded as acc channels are optional on having 1613 * firmware availability in the system. We set the loaded 1614 * and stated flag and when initialize the acc range, check 1615 * it and init the range only if pdsp is started. 1616 */ 1617 for_each_pdsp(kdev, pdsp) { 1618 ret = knav_queue_load_pdsp(kdev, pdsp); 1619 if (!ret) 1620 pdsp->loaded = true; 1621 } 1622 1623 for_each_pdsp(kdev, pdsp) { 1624 if (pdsp->loaded) { 1625 ret = knav_queue_start_pdsp(kdev, pdsp); 1626 if (!ret) 1627 pdsp->started = true; 1628 } 1629 } 1630 return 0; 1631 } 1632 1633 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) 1634 { 1635 struct knav_qmgr_info *qmgr; 1636 1637 for_each_qmgr(kdev, qmgr) { 1638 if ((id >= qmgr->start_queue) && 1639 (id < qmgr->start_queue + qmgr->num_queues)) 1640 return qmgr; 1641 } 1642 return NULL; 1643 } 1644 1645 static int knav_queue_init_queue(struct knav_device *kdev, 1646 struct knav_range_info *range, 1647 struct knav_queue_inst *inst, 1648 unsigned id) 1649 { 1650 char irq_name[KNAV_NAME_SIZE]; 1651 inst->qmgr = knav_find_qmgr(id); 1652 if (!inst->qmgr) 1653 return -1; 1654 1655 INIT_LIST_HEAD(&inst->handles); 1656 inst->kdev = kdev; 1657 inst->range = range; 1658 inst->irq_num = -1; 1659 inst->id = id; 1660 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); 1661 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); 1662 1663 if (range->ops && range->ops->init_queue) 1664 return range->ops->init_queue(range, inst); 1665 else 1666 return 0; 1667 } 1668 1669 static int knav_queue_init_queues(struct knav_device *kdev) 1670 { 1671 struct knav_range_info *range; 1672 int size, id, base_idx; 1673 int idx = 0, ret = 0; 1674 1675 /* how much do we need for instance data? */ 1676 size = sizeof(struct knav_queue_inst); 1677 1678 /* round this up to a power of 2, keep the index to instance 1679 * arithmetic fast. 1680 * */ 1681 kdev->inst_shift = order_base_2(size); 1682 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; 1683 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); 1684 if (!kdev->instances) 1685 return -ENOMEM; 1686 1687 for_each_queue_range(kdev, range) { 1688 if (range->ops && range->ops->init_range) 1689 range->ops->init_range(range); 1690 base_idx = idx; 1691 for (id = range->queue_base; 1692 id < range->queue_base + range->num_queues; id++, idx++) { 1693 ret = knav_queue_init_queue(kdev, range, 1694 knav_queue_idx_to_inst(kdev, idx), id); 1695 if (ret < 0) 1696 return ret; 1697 } 1698 range->queue_base_inst = 1699 knav_queue_idx_to_inst(kdev, base_idx); 1700 } 1701 return 0; 1702 } 1703 1704 static int knav_queue_probe(struct platform_device *pdev) 1705 { 1706 struct device_node *node = pdev->dev.of_node; 1707 struct device_node *qmgrs, *queue_pools, *regions, *pdsps; 1708 struct device *dev = &pdev->dev; 1709 u32 temp[2]; 1710 int ret; 1711 1712 if (!node) { 1713 dev_err(dev, "device tree info unavailable\n"); 1714 return -ENODEV; 1715 } 1716 1717 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL); 1718 if (!kdev) { 1719 dev_err(dev, "memory allocation failed\n"); 1720 return -ENOMEM; 1721 } 1722 1723 platform_set_drvdata(pdev, kdev); 1724 kdev->dev = dev; 1725 INIT_LIST_HEAD(&kdev->queue_ranges); 1726 INIT_LIST_HEAD(&kdev->qmgrs); 1727 INIT_LIST_HEAD(&kdev->pools); 1728 INIT_LIST_HEAD(&kdev->regions); 1729 INIT_LIST_HEAD(&kdev->pdsps); 1730 1731 pm_runtime_enable(&pdev->dev); 1732 ret = pm_runtime_get_sync(&pdev->dev); 1733 if (ret < 0) { 1734 dev_err(dev, "Failed to enable QMSS\n"); 1735 return ret; 1736 } 1737 1738 if (of_property_read_u32_array(node, "queue-range", temp, 2)) { 1739 dev_err(dev, "queue-range not specified\n"); 1740 ret = -ENODEV; 1741 goto err; 1742 } 1743 kdev->base_id = temp[0]; 1744 kdev->num_queues = temp[1]; 1745 1746 /* Initialize queue managers using device tree configuration */ 1747 qmgrs = of_get_child_by_name(node, "qmgrs"); 1748 if (!qmgrs) { 1749 dev_err(dev, "queue manager info not specified\n"); 1750 ret = -ENODEV; 1751 goto err; 1752 } 1753 ret = knav_queue_init_qmgrs(kdev, qmgrs); 1754 of_node_put(qmgrs); 1755 if (ret) 1756 goto err; 1757 1758 /* get pdsp configuration values from device tree */ 1759 pdsps = of_get_child_by_name(node, "pdsps"); 1760 if (pdsps) { 1761 ret = knav_queue_init_pdsps(kdev, pdsps); 1762 if (ret) 1763 goto err; 1764 1765 ret = knav_queue_start_pdsps(kdev); 1766 if (ret) 1767 goto err; 1768 } 1769 of_node_put(pdsps); 1770 1771 /* get usable queue range values from device tree */ 1772 queue_pools = of_get_child_by_name(node, "queue-pools"); 1773 if (!queue_pools) { 1774 dev_err(dev, "queue-pools not specified\n"); 1775 ret = -ENODEV; 1776 goto err; 1777 } 1778 ret = knav_setup_queue_pools(kdev, queue_pools); 1779 of_node_put(queue_pools); 1780 if (ret) 1781 goto err; 1782 1783 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); 1784 if (ret) { 1785 dev_err(kdev->dev, "could not setup linking ram\n"); 1786 goto err; 1787 } 1788 1789 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); 1790 if (ret) { 1791 /* 1792 * nothing really, we have one linking ram already, so we just 1793 * live within our means 1794 */ 1795 } 1796 1797 ret = knav_queue_setup_link_ram(kdev); 1798 if (ret) 1799 goto err; 1800 1801 regions = of_get_child_by_name(node, "descriptor-regions"); 1802 if (!regions) { 1803 dev_err(dev, "descriptor-regions not specified\n"); 1804 goto err; 1805 } 1806 ret = knav_queue_setup_regions(kdev, regions); 1807 of_node_put(regions); 1808 if (ret) 1809 goto err; 1810 1811 ret = knav_queue_init_queues(kdev); 1812 if (ret < 0) { 1813 dev_err(dev, "hwqueue initialization failed\n"); 1814 goto err; 1815 } 1816 1817 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, 1818 &knav_queue_debug_ops); 1819 return 0; 1820 1821 err: 1822 knav_queue_stop_pdsps(kdev); 1823 knav_queue_free_regions(kdev); 1824 knav_free_queue_ranges(kdev); 1825 pm_runtime_put_sync(&pdev->dev); 1826 pm_runtime_disable(&pdev->dev); 1827 return ret; 1828 } 1829 1830 static int knav_queue_remove(struct platform_device *pdev) 1831 { 1832 /* TODO: Free resources */ 1833 pm_runtime_put_sync(&pdev->dev); 1834 pm_runtime_disable(&pdev->dev); 1835 return 0; 1836 } 1837 1838 /* Match table for of_platform binding */ 1839 static struct of_device_id keystone_qmss_of_match[] = { 1840 { .compatible = "ti,keystone-navigator-qmss", }, 1841 {}, 1842 }; 1843 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); 1844 1845 static struct platform_driver keystone_qmss_driver = { 1846 .probe = knav_queue_probe, 1847 .remove = knav_queue_remove, 1848 .driver = { 1849 .name = "keystone-navigator-qmss", 1850 .of_match_table = keystone_qmss_of_match, 1851 }, 1852 }; 1853 module_platform_driver(keystone_qmss_driver); 1854 1855 MODULE_LICENSE("GPL v2"); 1856 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs"); 1857 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); 1858 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); 1859