1 /* 2 * Keystone Queue Manager subsystem driver 3 * 4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com 5 * Authors: Sandeep Nair <sandeep_n@ti.com> 6 * Cyril Chemparathy <cyril@ti.com> 7 * Santosh Shilimkar <santosh.shilimkar@ti.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 #include <linux/debugfs.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/firmware.h> 22 #include <linux/interrupt.h> 23 #include <linux/io.h> 24 #include <linux/module.h> 25 #include <linux/of_address.h> 26 #include <linux/of_device.h> 27 #include <linux/of_irq.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/slab.h> 30 #include <linux/soc/ti/knav_qmss.h> 31 32 #include "knav_qmss.h" 33 34 static struct knav_device *kdev; 35 static DEFINE_MUTEX(knav_dev_lock); 36 37 /* Queue manager register indices in DTS */ 38 #define KNAV_QUEUE_PEEK_REG_INDEX 0 39 #define KNAV_QUEUE_STATUS_REG_INDEX 1 40 #define KNAV_QUEUE_CONFIG_REG_INDEX 2 41 #define KNAV_QUEUE_REGION_REG_INDEX 3 42 #define KNAV_QUEUE_PUSH_REG_INDEX 4 43 #define KNAV_QUEUE_POP_REG_INDEX 5 44 45 /* PDSP register indices in DTS */ 46 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 47 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 48 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 49 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 50 51 #define knav_queue_idx_to_inst(kdev, idx) \ 52 (kdev->instances + (idx << kdev->inst_shift)) 53 54 #define for_each_handle_rcu(qh, inst) \ 55 list_for_each_entry_rcu(qh, &inst->handles, list) 56 57 #define for_each_instance(idx, inst, kdev) \ 58 for (idx = 0, inst = kdev->instances; \ 59 idx < (kdev)->num_queues_in_use; \ 60 idx++, inst = knav_queue_idx_to_inst(kdev, idx)) 61 62 /* All firmware file names end up here. List the firmware file names below. 63 * Newest followed by older ones. Search is done from start of the array 64 * until a firmware file is found. 65 */ 66 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; 67 68 /** 69 * knav_queue_notify: qmss queue notfier call 70 * 71 * @inst: qmss queue instance like accumulator 72 */ 73 void knav_queue_notify(struct knav_queue_inst *inst) 74 { 75 struct knav_queue *qh; 76 77 if (!inst) 78 return; 79 80 rcu_read_lock(); 81 for_each_handle_rcu(qh, inst) { 82 if (atomic_read(&qh->notifier_enabled) <= 0) 83 continue; 84 if (WARN_ON(!qh->notifier_fn)) 85 continue; 86 atomic_inc(&qh->stats.notifies); 87 qh->notifier_fn(qh->notifier_fn_arg); 88 } 89 rcu_read_unlock(); 90 } 91 EXPORT_SYMBOL_GPL(knav_queue_notify); 92 93 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) 94 { 95 struct knav_queue_inst *inst = _instdata; 96 97 knav_queue_notify(inst); 98 return IRQ_HANDLED; 99 } 100 101 static int knav_queue_setup_irq(struct knav_range_info *range, 102 struct knav_queue_inst *inst) 103 { 104 unsigned queue = inst->id - range->queue_base; 105 unsigned long cpu_map; 106 int ret = 0, irq; 107 108 if (range->flags & RANGE_HAS_IRQ) { 109 irq = range->irqs[queue].irq; 110 cpu_map = range->irqs[queue].cpu_map; 111 ret = request_irq(irq, knav_queue_int_handler, 0, 112 inst->irq_name, inst); 113 if (ret) 114 return ret; 115 disable_irq(irq); 116 if (cpu_map) { 117 ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); 118 if (ret) { 119 dev_warn(range->kdev->dev, 120 "Failed to set IRQ affinity\n"); 121 return ret; 122 } 123 } 124 } 125 return ret; 126 } 127 128 static void knav_queue_free_irq(struct knav_queue_inst *inst) 129 { 130 struct knav_range_info *range = inst->range; 131 unsigned queue = inst->id - inst->range->queue_base; 132 int irq; 133 134 if (range->flags & RANGE_HAS_IRQ) { 135 irq = range->irqs[queue].irq; 136 irq_set_affinity_hint(irq, NULL); 137 free_irq(irq, inst); 138 } 139 } 140 141 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) 142 { 143 return !list_empty(&inst->handles); 144 } 145 146 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) 147 { 148 return inst->range->flags & RANGE_RESERVED; 149 } 150 151 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) 152 { 153 struct knav_queue *tmp; 154 155 rcu_read_lock(); 156 for_each_handle_rcu(tmp, inst) { 157 if (tmp->flags & KNAV_QUEUE_SHARED) { 158 rcu_read_unlock(); 159 return true; 160 } 161 } 162 rcu_read_unlock(); 163 return false; 164 } 165 166 static inline bool knav_queue_match_type(struct knav_queue_inst *inst, 167 unsigned type) 168 { 169 if ((type == KNAV_QUEUE_QPEND) && 170 (inst->range->flags & RANGE_HAS_IRQ)) { 171 return true; 172 } else if ((type == KNAV_QUEUE_ACC) && 173 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { 174 return true; 175 } else if ((type == KNAV_QUEUE_GP) && 176 !(inst->range->flags & 177 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { 178 return true; 179 } 180 return false; 181 } 182 183 static inline struct knav_queue_inst * 184 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) 185 { 186 struct knav_queue_inst *inst; 187 int idx; 188 189 for_each_instance(idx, inst, kdev) { 190 if (inst->id == id) 191 return inst; 192 } 193 return NULL; 194 } 195 196 static inline struct knav_queue_inst *knav_queue_find_by_id(int id) 197 { 198 if (kdev->base_id <= id && 199 kdev->base_id + kdev->num_queues > id) { 200 id -= kdev->base_id; 201 return knav_queue_match_id_to_inst(kdev, id); 202 } 203 return NULL; 204 } 205 206 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, 207 const char *name, unsigned flags) 208 { 209 struct knav_queue *qh; 210 unsigned id; 211 int ret = 0; 212 213 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); 214 if (!qh) 215 return ERR_PTR(-ENOMEM); 216 217 qh->flags = flags; 218 qh->inst = inst; 219 id = inst->id - inst->qmgr->start_queue; 220 qh->reg_push = &inst->qmgr->reg_push[id]; 221 qh->reg_pop = &inst->qmgr->reg_pop[id]; 222 qh->reg_peek = &inst->qmgr->reg_peek[id]; 223 224 /* first opener? */ 225 if (!knav_queue_is_busy(inst)) { 226 struct knav_range_info *range = inst->range; 227 228 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); 229 if (range->ops && range->ops->open_queue) 230 ret = range->ops->open_queue(range, inst, flags); 231 232 if (ret) { 233 devm_kfree(inst->kdev->dev, qh); 234 return ERR_PTR(ret); 235 } 236 } 237 list_add_tail_rcu(&qh->list, &inst->handles); 238 return qh; 239 } 240 241 static struct knav_queue * 242 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) 243 { 244 struct knav_queue_inst *inst; 245 struct knav_queue *qh; 246 247 mutex_lock(&knav_dev_lock); 248 249 qh = ERR_PTR(-ENODEV); 250 inst = knav_queue_find_by_id(id); 251 if (!inst) 252 goto unlock_ret; 253 254 qh = ERR_PTR(-EEXIST); 255 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) 256 goto unlock_ret; 257 258 qh = ERR_PTR(-EBUSY); 259 if ((flags & KNAV_QUEUE_SHARED) && 260 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) 261 goto unlock_ret; 262 263 qh = __knav_queue_open(inst, name, flags); 264 265 unlock_ret: 266 mutex_unlock(&knav_dev_lock); 267 268 return qh; 269 } 270 271 static struct knav_queue *knav_queue_open_by_type(const char *name, 272 unsigned type, unsigned flags) 273 { 274 struct knav_queue_inst *inst; 275 struct knav_queue *qh = ERR_PTR(-EINVAL); 276 int idx; 277 278 mutex_lock(&knav_dev_lock); 279 280 for_each_instance(idx, inst, kdev) { 281 if (knav_queue_is_reserved(inst)) 282 continue; 283 if (!knav_queue_match_type(inst, type)) 284 continue; 285 if (knav_queue_is_busy(inst)) 286 continue; 287 qh = __knav_queue_open(inst, name, flags); 288 goto unlock_ret; 289 } 290 291 unlock_ret: 292 mutex_unlock(&knav_dev_lock); 293 return qh; 294 } 295 296 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) 297 { 298 struct knav_range_info *range = inst->range; 299 300 if (range->ops && range->ops->set_notify) 301 range->ops->set_notify(range, inst, enabled); 302 } 303 304 static int knav_queue_enable_notifier(struct knav_queue *qh) 305 { 306 struct knav_queue_inst *inst = qh->inst; 307 bool first; 308 309 if (WARN_ON(!qh->notifier_fn)) 310 return -EINVAL; 311 312 /* Adjust the per handle notifier count */ 313 first = (atomic_inc_return(&qh->notifier_enabled) == 1); 314 if (!first) 315 return 0; /* nothing to do */ 316 317 /* Now adjust the per instance notifier count */ 318 first = (atomic_inc_return(&inst->num_notifiers) == 1); 319 if (first) 320 knav_queue_set_notify(inst, true); 321 322 return 0; 323 } 324 325 static int knav_queue_disable_notifier(struct knav_queue *qh) 326 { 327 struct knav_queue_inst *inst = qh->inst; 328 bool last; 329 330 last = (atomic_dec_return(&qh->notifier_enabled) == 0); 331 if (!last) 332 return 0; /* nothing to do */ 333 334 last = (atomic_dec_return(&inst->num_notifiers) == 0); 335 if (last) 336 knav_queue_set_notify(inst, false); 337 338 return 0; 339 } 340 341 static int knav_queue_set_notifier(struct knav_queue *qh, 342 struct knav_queue_notify_config *cfg) 343 { 344 knav_queue_notify_fn old_fn = qh->notifier_fn; 345 346 if (!cfg) 347 return -EINVAL; 348 349 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) 350 return -ENOTSUPP; 351 352 if (!cfg->fn && old_fn) 353 knav_queue_disable_notifier(qh); 354 355 qh->notifier_fn = cfg->fn; 356 qh->notifier_fn_arg = cfg->fn_arg; 357 358 if (cfg->fn && !old_fn) 359 knav_queue_enable_notifier(qh); 360 361 return 0; 362 } 363 364 static int knav_gp_set_notify(struct knav_range_info *range, 365 struct knav_queue_inst *inst, 366 bool enabled) 367 { 368 unsigned queue; 369 370 if (range->flags & RANGE_HAS_IRQ) { 371 queue = inst->id - range->queue_base; 372 if (enabled) 373 enable_irq(range->irqs[queue].irq); 374 else 375 disable_irq_nosync(range->irqs[queue].irq); 376 } 377 return 0; 378 } 379 380 static int knav_gp_open_queue(struct knav_range_info *range, 381 struct knav_queue_inst *inst, unsigned flags) 382 { 383 return knav_queue_setup_irq(range, inst); 384 } 385 386 static int knav_gp_close_queue(struct knav_range_info *range, 387 struct knav_queue_inst *inst) 388 { 389 knav_queue_free_irq(inst); 390 return 0; 391 } 392 393 struct knav_range_ops knav_gp_range_ops = { 394 .set_notify = knav_gp_set_notify, 395 .open_queue = knav_gp_open_queue, 396 .close_queue = knav_gp_close_queue, 397 }; 398 399 400 static int knav_queue_get_count(void *qhandle) 401 { 402 struct knav_queue *qh = qhandle; 403 struct knav_queue_inst *inst = qh->inst; 404 405 return readl_relaxed(&qh->reg_peek[0].entry_count) + 406 atomic_read(&inst->desc_count); 407 } 408 409 static void knav_queue_debug_show_instance(struct seq_file *s, 410 struct knav_queue_inst *inst) 411 { 412 struct knav_device *kdev = inst->kdev; 413 struct knav_queue *qh; 414 415 if (!knav_queue_is_busy(inst)) 416 return; 417 418 seq_printf(s, "\tqueue id %d (%s)\n", 419 kdev->base_id + inst->id, inst->name); 420 for_each_handle_rcu(qh, inst) { 421 seq_printf(s, "\t\thandle %p: ", qh); 422 seq_printf(s, "pushes %8d, ", 423 atomic_read(&qh->stats.pushes)); 424 seq_printf(s, "pops %8d, ", 425 atomic_read(&qh->stats.pops)); 426 seq_printf(s, "count %8d, ", 427 knav_queue_get_count(qh)); 428 seq_printf(s, "notifies %8d, ", 429 atomic_read(&qh->stats.notifies)); 430 seq_printf(s, "push errors %8d, ", 431 atomic_read(&qh->stats.push_errors)); 432 seq_printf(s, "pop errors %8d\n", 433 atomic_read(&qh->stats.pop_errors)); 434 } 435 } 436 437 static int knav_queue_debug_show(struct seq_file *s, void *v) 438 { 439 struct knav_queue_inst *inst; 440 int idx; 441 442 mutex_lock(&knav_dev_lock); 443 seq_printf(s, "%s: %u-%u\n", 444 dev_name(kdev->dev), kdev->base_id, 445 kdev->base_id + kdev->num_queues - 1); 446 for_each_instance(idx, inst, kdev) 447 knav_queue_debug_show_instance(s, inst); 448 mutex_unlock(&knav_dev_lock); 449 450 return 0; 451 } 452 453 static int knav_queue_debug_open(struct inode *inode, struct file *file) 454 { 455 return single_open(file, knav_queue_debug_show, NULL); 456 } 457 458 static const struct file_operations knav_queue_debug_ops = { 459 .open = knav_queue_debug_open, 460 .read = seq_read, 461 .llseek = seq_lseek, 462 .release = single_release, 463 }; 464 465 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, 466 u32 flags) 467 { 468 unsigned long end; 469 u32 val = 0; 470 471 end = jiffies + msecs_to_jiffies(timeout); 472 while (time_after(end, jiffies)) { 473 val = readl_relaxed(addr); 474 if (flags) 475 val &= flags; 476 if (!val) 477 break; 478 cpu_relax(); 479 } 480 return val ? -ETIMEDOUT : 0; 481 } 482 483 484 static int knav_queue_flush(struct knav_queue *qh) 485 { 486 struct knav_queue_inst *inst = qh->inst; 487 unsigned id = inst->id - inst->qmgr->start_queue; 488 489 atomic_set(&inst->desc_count, 0); 490 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); 491 return 0; 492 } 493 494 /** 495 * knav_queue_open() - open a hardware queue 496 * @name - name to give the queue handle 497 * @id - desired queue number if any or specifes the type 498 * of queue 499 * @flags - the following flags are applicable to queues: 500 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are 501 * exclusive by default. 502 * Subsequent attempts to open a shared queue should 503 * also have this flag. 504 * 505 * Returns a handle to the open hardware queue if successful. Use IS_ERR() 506 * to check the returned value for error codes. 507 */ 508 void *knav_queue_open(const char *name, unsigned id, 509 unsigned flags) 510 { 511 struct knav_queue *qh = ERR_PTR(-EINVAL); 512 513 switch (id) { 514 case KNAV_QUEUE_QPEND: 515 case KNAV_QUEUE_ACC: 516 case KNAV_QUEUE_GP: 517 qh = knav_queue_open_by_type(name, id, flags); 518 break; 519 520 default: 521 qh = knav_queue_open_by_id(name, id, flags); 522 break; 523 } 524 return qh; 525 } 526 EXPORT_SYMBOL_GPL(knav_queue_open); 527 528 /** 529 * knav_queue_close() - close a hardware queue handle 530 * @qh - handle to close 531 */ 532 void knav_queue_close(void *qhandle) 533 { 534 struct knav_queue *qh = qhandle; 535 struct knav_queue_inst *inst = qh->inst; 536 537 while (atomic_read(&qh->notifier_enabled) > 0) 538 knav_queue_disable_notifier(qh); 539 540 mutex_lock(&knav_dev_lock); 541 list_del_rcu(&qh->list); 542 mutex_unlock(&knav_dev_lock); 543 synchronize_rcu(); 544 if (!knav_queue_is_busy(inst)) { 545 struct knav_range_info *range = inst->range; 546 547 if (range->ops && range->ops->close_queue) 548 range->ops->close_queue(range, inst); 549 } 550 devm_kfree(inst->kdev->dev, qh); 551 } 552 EXPORT_SYMBOL_GPL(knav_queue_close); 553 554 /** 555 * knav_queue_device_control() - Perform control operations on a queue 556 * @qh - queue handle 557 * @cmd - control commands 558 * @arg - command argument 559 * 560 * Returns 0 on success, errno otherwise. 561 */ 562 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, 563 unsigned long arg) 564 { 565 struct knav_queue *qh = qhandle; 566 struct knav_queue_notify_config *cfg; 567 int ret; 568 569 switch ((int)cmd) { 570 case KNAV_QUEUE_GET_ID: 571 ret = qh->inst->kdev->base_id + qh->inst->id; 572 break; 573 574 case KNAV_QUEUE_FLUSH: 575 ret = knav_queue_flush(qh); 576 break; 577 578 case KNAV_QUEUE_SET_NOTIFIER: 579 cfg = (void *)arg; 580 ret = knav_queue_set_notifier(qh, cfg); 581 break; 582 583 case KNAV_QUEUE_ENABLE_NOTIFY: 584 ret = knav_queue_enable_notifier(qh); 585 break; 586 587 case KNAV_QUEUE_DISABLE_NOTIFY: 588 ret = knav_queue_disable_notifier(qh); 589 break; 590 591 case KNAV_QUEUE_GET_COUNT: 592 ret = knav_queue_get_count(qh); 593 break; 594 595 default: 596 ret = -ENOTSUPP; 597 break; 598 } 599 return ret; 600 } 601 EXPORT_SYMBOL_GPL(knav_queue_device_control); 602 603 604 605 /** 606 * knav_queue_push() - push data (or descriptor) to the tail of a queue 607 * @qh - hardware queue handle 608 * @data - data to push 609 * @size - size of data to push 610 * @flags - can be used to pass additional information 611 * 612 * Returns 0 on success, errno otherwise. 613 */ 614 int knav_queue_push(void *qhandle, dma_addr_t dma, 615 unsigned size, unsigned flags) 616 { 617 struct knav_queue *qh = qhandle; 618 u32 val; 619 620 val = (u32)dma | ((size / 16) - 1); 621 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); 622 623 atomic_inc(&qh->stats.pushes); 624 return 0; 625 } 626 EXPORT_SYMBOL_GPL(knav_queue_push); 627 628 /** 629 * knav_queue_pop() - pop data (or descriptor) from the head of a queue 630 * @qh - hardware queue handle 631 * @size - (optional) size of the data pop'ed. 632 * 633 * Returns a DMA address on success, 0 on failure. 634 */ 635 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) 636 { 637 struct knav_queue *qh = qhandle; 638 struct knav_queue_inst *inst = qh->inst; 639 dma_addr_t dma; 640 u32 val, idx; 641 642 /* are we accumulated? */ 643 if (inst->descs) { 644 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { 645 atomic_inc(&inst->desc_count); 646 return 0; 647 } 648 idx = atomic_inc_return(&inst->desc_head); 649 idx &= ACC_DESCS_MASK; 650 val = inst->descs[idx]; 651 } else { 652 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); 653 if (unlikely(!val)) 654 return 0; 655 } 656 657 dma = val & DESC_PTR_MASK; 658 if (size) 659 *size = ((val & DESC_SIZE_MASK) + 1) * 16; 660 661 atomic_inc(&qh->stats.pops); 662 return dma; 663 } 664 EXPORT_SYMBOL_GPL(knav_queue_pop); 665 666 /* carve out descriptors and push into queue */ 667 static void kdesc_fill_pool(struct knav_pool *pool) 668 { 669 struct knav_region *region; 670 int i; 671 672 region = pool->region; 673 pool->desc_size = region->desc_size; 674 for (i = 0; i < pool->num_desc; i++) { 675 int index = pool->region_offset + i; 676 dma_addr_t dma_addr; 677 unsigned dma_size; 678 dma_addr = region->dma_start + (region->desc_size * index); 679 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); 680 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, 681 DMA_TO_DEVICE); 682 knav_queue_push(pool->queue, dma_addr, dma_size, 0); 683 } 684 } 685 686 /* pop out descriptors and close the queue */ 687 static void kdesc_empty_pool(struct knav_pool *pool) 688 { 689 dma_addr_t dma; 690 unsigned size; 691 void *desc; 692 int i; 693 694 if (!pool->queue) 695 return; 696 697 for (i = 0;; i++) { 698 dma = knav_queue_pop(pool->queue, &size); 699 if (!dma) 700 break; 701 desc = knav_pool_desc_dma_to_virt(pool, dma); 702 if (!desc) { 703 dev_dbg(pool->kdev->dev, 704 "couldn't unmap desc, continuing\n"); 705 continue; 706 } 707 } 708 WARN_ON(i != pool->num_desc); 709 knav_queue_close(pool->queue); 710 } 711 712 713 /* Get the DMA address of a descriptor */ 714 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) 715 { 716 struct knav_pool *pool = ph; 717 return pool->region->dma_start + (virt - pool->region->virt_start); 718 } 719 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma); 720 721 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) 722 { 723 struct knav_pool *pool = ph; 724 return pool->region->virt_start + (dma - pool->region->dma_start); 725 } 726 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt); 727 728 /** 729 * knav_pool_create() - Create a pool of descriptors 730 * @name - name to give the pool handle 731 * @num_desc - numbers of descriptors in the pool 732 * @region_id - QMSS region id from which the descriptors are to be 733 * allocated. 734 * 735 * Returns a pool handle on success. 736 * Use IS_ERR_OR_NULL() to identify error values on return. 737 */ 738 void *knav_pool_create(const char *name, 739 int num_desc, int region_id) 740 { 741 struct knav_region *reg_itr, *region = NULL; 742 struct knav_pool *pool, *pi; 743 struct list_head *node; 744 unsigned last_offset; 745 bool slot_found; 746 int ret; 747 748 if (!kdev->dev) 749 return ERR_PTR(-ENODEV); 750 751 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); 752 if (!pool) { 753 dev_err(kdev->dev, "out of memory allocating pool\n"); 754 return ERR_PTR(-ENOMEM); 755 } 756 757 for_each_region(kdev, reg_itr) { 758 if (reg_itr->id != region_id) 759 continue; 760 region = reg_itr; 761 break; 762 } 763 764 if (!region) { 765 dev_err(kdev->dev, "region-id(%d) not found\n", region_id); 766 ret = -EINVAL; 767 goto err; 768 } 769 770 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); 771 if (IS_ERR_OR_NULL(pool->queue)) { 772 dev_err(kdev->dev, 773 "failed to open queue for pool(%s), error %ld\n", 774 name, PTR_ERR(pool->queue)); 775 ret = PTR_ERR(pool->queue); 776 goto err; 777 } 778 779 pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); 780 pool->kdev = kdev; 781 pool->dev = kdev->dev; 782 783 mutex_lock(&knav_dev_lock); 784 785 if (num_desc > (region->num_desc - region->used_desc)) { 786 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", 787 region_id, name); 788 ret = -ENOMEM; 789 goto err_unlock; 790 } 791 792 /* Region maintains a sorted (by region offset) list of pools 793 * use the first free slot which is large enough to accomodate 794 * the request 795 */ 796 last_offset = 0; 797 slot_found = false; 798 node = ®ion->pools; 799 list_for_each_entry(pi, ®ion->pools, region_inst) { 800 if ((pi->region_offset - last_offset) >= num_desc) { 801 slot_found = true; 802 break; 803 } 804 last_offset = pi->region_offset + pi->num_desc; 805 } 806 node = &pi->region_inst; 807 808 if (slot_found) { 809 pool->region = region; 810 pool->num_desc = num_desc; 811 pool->region_offset = last_offset; 812 region->used_desc += num_desc; 813 list_add_tail(&pool->list, &kdev->pools); 814 list_add_tail(&pool->region_inst, node); 815 } else { 816 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", 817 name, region_id); 818 ret = -ENOMEM; 819 goto err_unlock; 820 } 821 822 mutex_unlock(&knav_dev_lock); 823 kdesc_fill_pool(pool); 824 return pool; 825 826 err_unlock: 827 mutex_unlock(&knav_dev_lock); 828 err: 829 kfree(pool->name); 830 devm_kfree(kdev->dev, pool); 831 return ERR_PTR(ret); 832 } 833 EXPORT_SYMBOL_GPL(knav_pool_create); 834 835 /** 836 * knav_pool_destroy() - Free a pool of descriptors 837 * @pool - pool handle 838 */ 839 void knav_pool_destroy(void *ph) 840 { 841 struct knav_pool *pool = ph; 842 843 if (!pool) 844 return; 845 846 if (!pool->region) 847 return; 848 849 kdesc_empty_pool(pool); 850 mutex_lock(&knav_dev_lock); 851 852 pool->region->used_desc -= pool->num_desc; 853 list_del(&pool->region_inst); 854 list_del(&pool->list); 855 856 mutex_unlock(&knav_dev_lock); 857 kfree(pool->name); 858 devm_kfree(kdev->dev, pool); 859 } 860 EXPORT_SYMBOL_GPL(knav_pool_destroy); 861 862 863 /** 864 * knav_pool_desc_get() - Get a descriptor from the pool 865 * @pool - pool handle 866 * 867 * Returns descriptor from the pool. 868 */ 869 void *knav_pool_desc_get(void *ph) 870 { 871 struct knav_pool *pool = ph; 872 dma_addr_t dma; 873 unsigned size; 874 void *data; 875 876 dma = knav_queue_pop(pool->queue, &size); 877 if (unlikely(!dma)) 878 return ERR_PTR(-ENOMEM); 879 data = knav_pool_desc_dma_to_virt(pool, dma); 880 return data; 881 } 882 EXPORT_SYMBOL_GPL(knav_pool_desc_get); 883 884 /** 885 * knav_pool_desc_put() - return a descriptor to the pool 886 * @pool - pool handle 887 */ 888 void knav_pool_desc_put(void *ph, void *desc) 889 { 890 struct knav_pool *pool = ph; 891 dma_addr_t dma; 892 dma = knav_pool_desc_virt_to_dma(pool, desc); 893 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); 894 } 895 EXPORT_SYMBOL_GPL(knav_pool_desc_put); 896 897 /** 898 * knav_pool_desc_map() - Map descriptor for DMA transfer 899 * @pool - pool handle 900 * @desc - address of descriptor to map 901 * @size - size of descriptor to map 902 * @dma - DMA address return pointer 903 * @dma_sz - adjusted return pointer 904 * 905 * Returns 0 on success, errno otherwise. 906 */ 907 int knav_pool_desc_map(void *ph, void *desc, unsigned size, 908 dma_addr_t *dma, unsigned *dma_sz) 909 { 910 struct knav_pool *pool = ph; 911 *dma = knav_pool_desc_virt_to_dma(pool, desc); 912 size = min(size, pool->region->desc_size); 913 size = ALIGN(size, SMP_CACHE_BYTES); 914 *dma_sz = size; 915 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); 916 917 /* Ensure the descriptor reaches to the memory */ 918 __iowmb(); 919 920 return 0; 921 } 922 EXPORT_SYMBOL_GPL(knav_pool_desc_map); 923 924 /** 925 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer 926 * @pool - pool handle 927 * @dma - DMA address of descriptor to unmap 928 * @dma_sz - size of descriptor to unmap 929 * 930 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify 931 * error values on return. 932 */ 933 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) 934 { 935 struct knav_pool *pool = ph; 936 unsigned desc_sz; 937 void *desc; 938 939 desc_sz = min(dma_sz, pool->region->desc_size); 940 desc = knav_pool_desc_dma_to_virt(pool, dma); 941 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); 942 prefetch(desc); 943 return desc; 944 } 945 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap); 946 947 /** 948 * knav_pool_count() - Get the number of descriptors in pool. 949 * @pool - pool handle 950 * Returns number of elements in the pool. 951 */ 952 int knav_pool_count(void *ph) 953 { 954 struct knav_pool *pool = ph; 955 return knav_queue_get_count(pool->queue); 956 } 957 EXPORT_SYMBOL_GPL(knav_pool_count); 958 959 static void knav_queue_setup_region(struct knav_device *kdev, 960 struct knav_region *region) 961 { 962 unsigned hw_num_desc, hw_desc_size, size; 963 struct knav_reg_region __iomem *regs; 964 struct knav_qmgr_info *qmgr; 965 struct knav_pool *pool; 966 int id = region->id; 967 struct page *page; 968 969 /* unused region? */ 970 if (!region->num_desc) { 971 dev_warn(kdev->dev, "unused region %s\n", region->name); 972 return; 973 } 974 975 /* get hardware descriptor value */ 976 hw_num_desc = ilog2(region->num_desc - 1) + 1; 977 978 /* did we force fit ourselves into nothingness? */ 979 if (region->num_desc < 32) { 980 region->num_desc = 0; 981 dev_warn(kdev->dev, "too few descriptors in region %s\n", 982 region->name); 983 return; 984 } 985 986 size = region->num_desc * region->desc_size; 987 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | 988 GFP_DMA32); 989 if (!region->virt_start) { 990 region->num_desc = 0; 991 dev_err(kdev->dev, "memory alloc failed for region %s\n", 992 region->name); 993 return; 994 } 995 region->virt_end = region->virt_start + size; 996 page = virt_to_page(region->virt_start); 997 998 region->dma_start = dma_map_page(kdev->dev, page, 0, size, 999 DMA_BIDIRECTIONAL); 1000 if (dma_mapping_error(kdev->dev, region->dma_start)) { 1001 dev_err(kdev->dev, "dma map failed for region %s\n", 1002 region->name); 1003 goto fail; 1004 } 1005 region->dma_end = region->dma_start + size; 1006 1007 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); 1008 if (!pool) { 1009 dev_err(kdev->dev, "out of memory allocating dummy pool\n"); 1010 goto fail; 1011 } 1012 pool->num_desc = 0; 1013 pool->region_offset = region->num_desc; 1014 list_add(&pool->region_inst, ®ion->pools); 1015 1016 dev_dbg(kdev->dev, 1017 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n", 1018 region->name, id, region->desc_size, region->num_desc, 1019 region->link_index, ®ion->dma_start, ®ion->dma_end, 1020 region->virt_start, region->virt_end); 1021 1022 hw_desc_size = (region->desc_size / 16) - 1; 1023 hw_num_desc -= 5; 1024 1025 for_each_qmgr(kdev, qmgr) { 1026 regs = qmgr->reg_region + id; 1027 writel_relaxed((u32)region->dma_start, ®s->base); 1028 writel_relaxed(region->link_index, ®s->start_index); 1029 writel_relaxed(hw_desc_size << 16 | hw_num_desc, 1030 ®s->size_count); 1031 } 1032 return; 1033 1034 fail: 1035 if (region->dma_start) 1036 dma_unmap_page(kdev->dev, region->dma_start, size, 1037 DMA_BIDIRECTIONAL); 1038 if (region->virt_start) 1039 free_pages_exact(region->virt_start, size); 1040 region->num_desc = 0; 1041 return; 1042 } 1043 1044 static const char *knav_queue_find_name(struct device_node *node) 1045 { 1046 const char *name; 1047 1048 if (of_property_read_string(node, "label", &name) < 0) 1049 name = node->name; 1050 if (!name) 1051 name = "unknown"; 1052 return name; 1053 } 1054 1055 static int knav_queue_setup_regions(struct knav_device *kdev, 1056 struct device_node *regions) 1057 { 1058 struct device *dev = kdev->dev; 1059 struct knav_region *region; 1060 struct device_node *child; 1061 u32 temp[2]; 1062 int ret; 1063 1064 for_each_child_of_node(regions, child) { 1065 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); 1066 if (!region) { 1067 dev_err(dev, "out of memory allocating region\n"); 1068 return -ENOMEM; 1069 } 1070 1071 region->name = knav_queue_find_name(child); 1072 of_property_read_u32(child, "id", ®ion->id); 1073 ret = of_property_read_u32_array(child, "region-spec", temp, 2); 1074 if (!ret) { 1075 region->num_desc = temp[0]; 1076 region->desc_size = temp[1]; 1077 } else { 1078 dev_err(dev, "invalid region info %s\n", region->name); 1079 devm_kfree(dev, region); 1080 continue; 1081 } 1082 1083 if (!of_get_property(child, "link-index", NULL)) { 1084 dev_err(dev, "No link info for %s\n", region->name); 1085 devm_kfree(dev, region); 1086 continue; 1087 } 1088 ret = of_property_read_u32(child, "link-index", 1089 ®ion->link_index); 1090 if (ret) { 1091 dev_err(dev, "link index not found for %s\n", 1092 region->name); 1093 devm_kfree(dev, region); 1094 continue; 1095 } 1096 1097 INIT_LIST_HEAD(®ion->pools); 1098 list_add_tail(®ion->list, &kdev->regions); 1099 } 1100 if (list_empty(&kdev->regions)) { 1101 dev_err(dev, "no valid region information found\n"); 1102 return -ENODEV; 1103 } 1104 1105 /* Next, we run through the regions and set things up */ 1106 for_each_region(kdev, region) 1107 knav_queue_setup_region(kdev, region); 1108 1109 return 0; 1110 } 1111 1112 static int knav_get_link_ram(struct knav_device *kdev, 1113 const char *name, 1114 struct knav_link_ram_block *block) 1115 { 1116 struct platform_device *pdev = to_platform_device(kdev->dev); 1117 struct device_node *node = pdev->dev.of_node; 1118 u32 temp[2]; 1119 1120 /* 1121 * Note: link ram resources are specified in "entry" sized units. In 1122 * reality, although entries are ~40bits in hardware, we treat them as 1123 * 64-bit entities here. 1124 * 1125 * For example, to specify the internal link ram for Keystone-I class 1126 * devices, we would set the linkram0 resource to 0x80000-0x83fff. 1127 * 1128 * This gets a bit weird when other link rams are used. For example, 1129 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries 1130 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, 1131 * which accounts for 64-bits per entry, for 16K entries. 1132 */ 1133 if (!of_property_read_u32_array(node, name , temp, 2)) { 1134 if (temp[0]) { 1135 /* 1136 * queue_base specified => using internal or onchip 1137 * link ram WARNING - we do not "reserve" this block 1138 */ 1139 block->dma = (dma_addr_t)temp[0]; 1140 block->virt = NULL; 1141 block->size = temp[1]; 1142 } else { 1143 block->size = temp[1]; 1144 /* queue_base not specific => allocate requested size */ 1145 block->virt = dmam_alloc_coherent(kdev->dev, 1146 8 * block->size, &block->dma, 1147 GFP_KERNEL); 1148 if (!block->virt) { 1149 dev_err(kdev->dev, "failed to alloc linkram\n"); 1150 return -ENOMEM; 1151 } 1152 } 1153 } else { 1154 return -ENODEV; 1155 } 1156 return 0; 1157 } 1158 1159 static int knav_queue_setup_link_ram(struct knav_device *kdev) 1160 { 1161 struct knav_link_ram_block *block; 1162 struct knav_qmgr_info *qmgr; 1163 1164 for_each_qmgr(kdev, qmgr) { 1165 block = &kdev->link_rams[0]; 1166 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n", 1167 &block->dma, block->virt, block->size); 1168 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0); 1169 writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); 1170 1171 block++; 1172 if (!block->size) 1173 continue; 1174 1175 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n", 1176 &block->dma, block->virt, block->size); 1177 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1); 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int knav_setup_queue_range(struct knav_device *kdev, 1184 struct device_node *node) 1185 { 1186 struct device *dev = kdev->dev; 1187 struct knav_range_info *range; 1188 struct knav_qmgr_info *qmgr; 1189 u32 temp[2], start, end, id, index; 1190 int ret, i; 1191 1192 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); 1193 if (!range) { 1194 dev_err(dev, "out of memory allocating range\n"); 1195 return -ENOMEM; 1196 } 1197 1198 range->kdev = kdev; 1199 range->name = knav_queue_find_name(node); 1200 ret = of_property_read_u32_array(node, "qrange", temp, 2); 1201 if (!ret) { 1202 range->queue_base = temp[0] - kdev->base_id; 1203 range->num_queues = temp[1]; 1204 } else { 1205 dev_err(dev, "invalid queue range %s\n", range->name); 1206 devm_kfree(dev, range); 1207 return -EINVAL; 1208 } 1209 1210 for (i = 0; i < RANGE_MAX_IRQS; i++) { 1211 struct of_phandle_args oirq; 1212 1213 if (of_irq_parse_one(node, i, &oirq)) 1214 break; 1215 1216 range->irqs[i].irq = irq_create_of_mapping(&oirq); 1217 if (range->irqs[i].irq == IRQ_NONE) 1218 break; 1219 1220 range->num_irqs++; 1221 1222 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) 1223 range->irqs[i].cpu_map = 1224 (oirq.args[2] & 0x0000ff00) >> 8; 1225 } 1226 1227 range->num_irqs = min(range->num_irqs, range->num_queues); 1228 if (range->num_irqs) 1229 range->flags |= RANGE_HAS_IRQ; 1230 1231 if (of_get_property(node, "qalloc-by-id", NULL)) 1232 range->flags |= RANGE_RESERVED; 1233 1234 if (of_get_property(node, "accumulator", NULL)) { 1235 ret = knav_init_acc_range(kdev, node, range); 1236 if (ret < 0) { 1237 devm_kfree(dev, range); 1238 return ret; 1239 } 1240 } else { 1241 range->ops = &knav_gp_range_ops; 1242 } 1243 1244 /* set threshold to 1, and flush out the queues */ 1245 for_each_qmgr(kdev, qmgr) { 1246 start = max(qmgr->start_queue, range->queue_base); 1247 end = min(qmgr->start_queue + qmgr->num_queues, 1248 range->queue_base + range->num_queues); 1249 for (id = start; id < end; id++) { 1250 index = id - qmgr->start_queue; 1251 writel_relaxed(THRESH_GTE | 1, 1252 &qmgr->reg_peek[index].ptr_size_thresh); 1253 writel_relaxed(0, 1254 &qmgr->reg_push[index].ptr_size_thresh); 1255 } 1256 } 1257 1258 list_add_tail(&range->list, &kdev->queue_ranges); 1259 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", 1260 range->name, range->queue_base, 1261 range->queue_base + range->num_queues - 1, 1262 range->num_irqs, 1263 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", 1264 (range->flags & RANGE_RESERVED) ? ", reserved" : "", 1265 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); 1266 kdev->num_queues_in_use += range->num_queues; 1267 return 0; 1268 } 1269 1270 static int knav_setup_queue_pools(struct knav_device *kdev, 1271 struct device_node *queue_pools) 1272 { 1273 struct device_node *type, *range; 1274 int ret; 1275 1276 for_each_child_of_node(queue_pools, type) { 1277 for_each_child_of_node(type, range) { 1278 ret = knav_setup_queue_range(kdev, range); 1279 /* return value ignored, we init the rest... */ 1280 } 1281 } 1282 1283 /* ... and barf if they all failed! */ 1284 if (list_empty(&kdev->queue_ranges)) { 1285 dev_err(kdev->dev, "no valid queue range found\n"); 1286 return -ENODEV; 1287 } 1288 return 0; 1289 } 1290 1291 static void knav_free_queue_range(struct knav_device *kdev, 1292 struct knav_range_info *range) 1293 { 1294 if (range->ops && range->ops->free_range) 1295 range->ops->free_range(range); 1296 list_del(&range->list); 1297 devm_kfree(kdev->dev, range); 1298 } 1299 1300 static void knav_free_queue_ranges(struct knav_device *kdev) 1301 { 1302 struct knav_range_info *range; 1303 1304 for (;;) { 1305 range = first_queue_range(kdev); 1306 if (!range) 1307 break; 1308 knav_free_queue_range(kdev, range); 1309 } 1310 } 1311 1312 static void knav_queue_free_regions(struct knav_device *kdev) 1313 { 1314 struct knav_region *region; 1315 struct knav_pool *pool, *tmp; 1316 unsigned size; 1317 1318 for (;;) { 1319 region = first_region(kdev); 1320 if (!region) 1321 break; 1322 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) 1323 knav_pool_destroy(pool); 1324 1325 size = region->virt_end - region->virt_start; 1326 if (size) 1327 free_pages_exact(region->virt_start, size); 1328 list_del(®ion->list); 1329 devm_kfree(kdev->dev, region); 1330 } 1331 } 1332 1333 static void __iomem *knav_queue_map_reg(struct knav_device *kdev, 1334 struct device_node *node, int index) 1335 { 1336 struct resource res; 1337 void __iomem *regs; 1338 int ret; 1339 1340 ret = of_address_to_resource(node, index, &res); 1341 if (ret) { 1342 dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n", 1343 node->name, index); 1344 return ERR_PTR(ret); 1345 } 1346 1347 regs = devm_ioremap_resource(kdev->dev, &res); 1348 if (IS_ERR(regs)) 1349 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n", 1350 index, node->name); 1351 return regs; 1352 } 1353 1354 static int knav_queue_init_qmgrs(struct knav_device *kdev, 1355 struct device_node *qmgrs) 1356 { 1357 struct device *dev = kdev->dev; 1358 struct knav_qmgr_info *qmgr; 1359 struct device_node *child; 1360 u32 temp[2]; 1361 int ret; 1362 1363 for_each_child_of_node(qmgrs, child) { 1364 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); 1365 if (!qmgr) { 1366 dev_err(dev, "out of memory allocating qmgr\n"); 1367 return -ENOMEM; 1368 } 1369 1370 ret = of_property_read_u32_array(child, "managed-queues", 1371 temp, 2); 1372 if (!ret) { 1373 qmgr->start_queue = temp[0]; 1374 qmgr->num_queues = temp[1]; 1375 } else { 1376 dev_err(dev, "invalid qmgr queue range\n"); 1377 devm_kfree(dev, qmgr); 1378 continue; 1379 } 1380 1381 dev_info(dev, "qmgr start queue %d, number of queues %d\n", 1382 qmgr->start_queue, qmgr->num_queues); 1383 1384 qmgr->reg_peek = 1385 knav_queue_map_reg(kdev, child, 1386 KNAV_QUEUE_PEEK_REG_INDEX); 1387 qmgr->reg_status = 1388 knav_queue_map_reg(kdev, child, 1389 KNAV_QUEUE_STATUS_REG_INDEX); 1390 qmgr->reg_config = 1391 knav_queue_map_reg(kdev, child, 1392 KNAV_QUEUE_CONFIG_REG_INDEX); 1393 qmgr->reg_region = 1394 knav_queue_map_reg(kdev, child, 1395 KNAV_QUEUE_REGION_REG_INDEX); 1396 qmgr->reg_push = 1397 knav_queue_map_reg(kdev, child, 1398 KNAV_QUEUE_PUSH_REG_INDEX); 1399 qmgr->reg_pop = 1400 knav_queue_map_reg(kdev, child, 1401 KNAV_QUEUE_POP_REG_INDEX); 1402 1403 if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || 1404 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || 1405 IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { 1406 dev_err(dev, "failed to map qmgr regs\n"); 1407 if (!IS_ERR(qmgr->reg_peek)) 1408 devm_iounmap(dev, qmgr->reg_peek); 1409 if (!IS_ERR(qmgr->reg_status)) 1410 devm_iounmap(dev, qmgr->reg_status); 1411 if (!IS_ERR(qmgr->reg_config)) 1412 devm_iounmap(dev, qmgr->reg_config); 1413 if (!IS_ERR(qmgr->reg_region)) 1414 devm_iounmap(dev, qmgr->reg_region); 1415 if (!IS_ERR(qmgr->reg_push)) 1416 devm_iounmap(dev, qmgr->reg_push); 1417 if (!IS_ERR(qmgr->reg_pop)) 1418 devm_iounmap(dev, qmgr->reg_pop); 1419 devm_kfree(dev, qmgr); 1420 continue; 1421 } 1422 1423 list_add_tail(&qmgr->list, &kdev->qmgrs); 1424 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", 1425 qmgr->start_queue, qmgr->num_queues, 1426 qmgr->reg_peek, qmgr->reg_status, 1427 qmgr->reg_config, qmgr->reg_region, 1428 qmgr->reg_push, qmgr->reg_pop); 1429 } 1430 return 0; 1431 } 1432 1433 static int knav_queue_init_pdsps(struct knav_device *kdev, 1434 struct device_node *pdsps) 1435 { 1436 struct device *dev = kdev->dev; 1437 struct knav_pdsp_info *pdsp; 1438 struct device_node *child; 1439 1440 for_each_child_of_node(pdsps, child) { 1441 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); 1442 if (!pdsp) { 1443 dev_err(dev, "out of memory allocating pdsp\n"); 1444 return -ENOMEM; 1445 } 1446 pdsp->name = knav_queue_find_name(child); 1447 pdsp->iram = 1448 knav_queue_map_reg(kdev, child, 1449 KNAV_QUEUE_PDSP_IRAM_REG_INDEX); 1450 pdsp->regs = 1451 knav_queue_map_reg(kdev, child, 1452 KNAV_QUEUE_PDSP_REGS_REG_INDEX); 1453 pdsp->intd = 1454 knav_queue_map_reg(kdev, child, 1455 KNAV_QUEUE_PDSP_INTD_REG_INDEX); 1456 pdsp->command = 1457 knav_queue_map_reg(kdev, child, 1458 KNAV_QUEUE_PDSP_CMD_REG_INDEX); 1459 1460 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || 1461 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { 1462 dev_err(dev, "failed to map pdsp %s regs\n", 1463 pdsp->name); 1464 if (!IS_ERR(pdsp->command)) 1465 devm_iounmap(dev, pdsp->command); 1466 if (!IS_ERR(pdsp->iram)) 1467 devm_iounmap(dev, pdsp->iram); 1468 if (!IS_ERR(pdsp->regs)) 1469 devm_iounmap(dev, pdsp->regs); 1470 if (!IS_ERR(pdsp->intd)) 1471 devm_iounmap(dev, pdsp->intd); 1472 devm_kfree(dev, pdsp); 1473 continue; 1474 } 1475 of_property_read_u32(child, "id", &pdsp->id); 1476 list_add_tail(&pdsp->list, &kdev->pdsps); 1477 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n", 1478 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, 1479 pdsp->intd); 1480 } 1481 return 0; 1482 } 1483 1484 static int knav_queue_stop_pdsp(struct knav_device *kdev, 1485 struct knav_pdsp_info *pdsp) 1486 { 1487 u32 val, timeout = 1000; 1488 int ret; 1489 1490 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; 1491 writel_relaxed(val, &pdsp->regs->control); 1492 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, 1493 PDSP_CTRL_RUNNING); 1494 if (ret < 0) { 1495 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); 1496 return ret; 1497 } 1498 pdsp->loaded = false; 1499 pdsp->started = false; 1500 return 0; 1501 } 1502 1503 static int knav_queue_load_pdsp(struct knav_device *kdev, 1504 struct knav_pdsp_info *pdsp) 1505 { 1506 int i, ret, fwlen; 1507 const struct firmware *fw; 1508 bool found = false; 1509 u32 *fwdata; 1510 1511 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { 1512 if (knav_acc_firmwares[i]) { 1513 ret = request_firmware_direct(&fw, 1514 knav_acc_firmwares[i], 1515 kdev->dev); 1516 if (!ret) { 1517 found = true; 1518 break; 1519 } 1520 } 1521 } 1522 1523 if (!found) { 1524 dev_err(kdev->dev, "failed to get firmware for pdsp\n"); 1525 return -ENODEV; 1526 } 1527 1528 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n", 1529 knav_acc_firmwares[i]); 1530 1531 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); 1532 /* download the firmware */ 1533 fwdata = (u32 *)fw->data; 1534 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); 1535 for (i = 0; i < fwlen; i++) 1536 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); 1537 1538 release_firmware(fw); 1539 return 0; 1540 } 1541 1542 static int knav_queue_start_pdsp(struct knav_device *kdev, 1543 struct knav_pdsp_info *pdsp) 1544 { 1545 u32 val, timeout = 1000; 1546 int ret; 1547 1548 /* write a command for sync */ 1549 writel_relaxed(0xffffffff, pdsp->command); 1550 while (readl_relaxed(pdsp->command) != 0xffffffff) 1551 cpu_relax(); 1552 1553 /* soft reset the PDSP */ 1554 val = readl_relaxed(&pdsp->regs->control); 1555 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); 1556 writel_relaxed(val, &pdsp->regs->control); 1557 1558 /* enable pdsp */ 1559 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; 1560 writel_relaxed(val, &pdsp->regs->control); 1561 1562 /* wait for command register to clear */ 1563 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); 1564 if (ret < 0) { 1565 dev_err(kdev->dev, 1566 "timed out on pdsp %s command register wait\n", 1567 pdsp->name); 1568 return ret; 1569 } 1570 return 0; 1571 } 1572 1573 static void knav_queue_stop_pdsps(struct knav_device *kdev) 1574 { 1575 struct knav_pdsp_info *pdsp; 1576 1577 /* disable all pdsps */ 1578 for_each_pdsp(kdev, pdsp) 1579 knav_queue_stop_pdsp(kdev, pdsp); 1580 } 1581 1582 static int knav_queue_start_pdsps(struct knav_device *kdev) 1583 { 1584 struct knav_pdsp_info *pdsp; 1585 int ret; 1586 1587 knav_queue_stop_pdsps(kdev); 1588 /* now load them all. We return success even if pdsp 1589 * is not loaded as acc channels are optional on having 1590 * firmware availability in the system. We set the loaded 1591 * and stated flag and when initialize the acc range, check 1592 * it and init the range only if pdsp is started. 1593 */ 1594 for_each_pdsp(kdev, pdsp) { 1595 ret = knav_queue_load_pdsp(kdev, pdsp); 1596 if (!ret) 1597 pdsp->loaded = true; 1598 } 1599 1600 for_each_pdsp(kdev, pdsp) { 1601 if (pdsp->loaded) { 1602 ret = knav_queue_start_pdsp(kdev, pdsp); 1603 if (!ret) 1604 pdsp->started = true; 1605 } 1606 } 1607 return 0; 1608 } 1609 1610 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) 1611 { 1612 struct knav_qmgr_info *qmgr; 1613 1614 for_each_qmgr(kdev, qmgr) { 1615 if ((id >= qmgr->start_queue) && 1616 (id < qmgr->start_queue + qmgr->num_queues)) 1617 return qmgr; 1618 } 1619 return NULL; 1620 } 1621 1622 static int knav_queue_init_queue(struct knav_device *kdev, 1623 struct knav_range_info *range, 1624 struct knav_queue_inst *inst, 1625 unsigned id) 1626 { 1627 char irq_name[KNAV_NAME_SIZE]; 1628 inst->qmgr = knav_find_qmgr(id); 1629 if (!inst->qmgr) 1630 return -1; 1631 1632 INIT_LIST_HEAD(&inst->handles); 1633 inst->kdev = kdev; 1634 inst->range = range; 1635 inst->irq_num = -1; 1636 inst->id = id; 1637 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); 1638 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); 1639 1640 if (range->ops && range->ops->init_queue) 1641 return range->ops->init_queue(range, inst); 1642 else 1643 return 0; 1644 } 1645 1646 static int knav_queue_init_queues(struct knav_device *kdev) 1647 { 1648 struct knav_range_info *range; 1649 int size, id, base_idx; 1650 int idx = 0, ret = 0; 1651 1652 /* how much do we need for instance data? */ 1653 size = sizeof(struct knav_queue_inst); 1654 1655 /* round this up to a power of 2, keep the index to instance 1656 * arithmetic fast. 1657 * */ 1658 kdev->inst_shift = order_base_2(size); 1659 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; 1660 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); 1661 if (!kdev->instances) 1662 return -ENOMEM; 1663 1664 for_each_queue_range(kdev, range) { 1665 if (range->ops && range->ops->init_range) 1666 range->ops->init_range(range); 1667 base_idx = idx; 1668 for (id = range->queue_base; 1669 id < range->queue_base + range->num_queues; id++, idx++) { 1670 ret = knav_queue_init_queue(kdev, range, 1671 knav_queue_idx_to_inst(kdev, idx), id); 1672 if (ret < 0) 1673 return ret; 1674 } 1675 range->queue_base_inst = 1676 knav_queue_idx_to_inst(kdev, base_idx); 1677 } 1678 return 0; 1679 } 1680 1681 static int knav_queue_probe(struct platform_device *pdev) 1682 { 1683 struct device_node *node = pdev->dev.of_node; 1684 struct device_node *qmgrs, *queue_pools, *regions, *pdsps; 1685 struct device *dev = &pdev->dev; 1686 u32 temp[2]; 1687 int ret; 1688 1689 if (!node) { 1690 dev_err(dev, "device tree info unavailable\n"); 1691 return -ENODEV; 1692 } 1693 1694 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL); 1695 if (!kdev) { 1696 dev_err(dev, "memory allocation failed\n"); 1697 return -ENOMEM; 1698 } 1699 1700 platform_set_drvdata(pdev, kdev); 1701 kdev->dev = dev; 1702 INIT_LIST_HEAD(&kdev->queue_ranges); 1703 INIT_LIST_HEAD(&kdev->qmgrs); 1704 INIT_LIST_HEAD(&kdev->pools); 1705 INIT_LIST_HEAD(&kdev->regions); 1706 INIT_LIST_HEAD(&kdev->pdsps); 1707 1708 pm_runtime_enable(&pdev->dev); 1709 ret = pm_runtime_get_sync(&pdev->dev); 1710 if (ret < 0) { 1711 dev_err(dev, "Failed to enable QMSS\n"); 1712 return ret; 1713 } 1714 1715 if (of_property_read_u32_array(node, "queue-range", temp, 2)) { 1716 dev_err(dev, "queue-range not specified\n"); 1717 ret = -ENODEV; 1718 goto err; 1719 } 1720 kdev->base_id = temp[0]; 1721 kdev->num_queues = temp[1]; 1722 1723 /* Initialize queue managers using device tree configuration */ 1724 qmgrs = of_get_child_by_name(node, "qmgrs"); 1725 if (!qmgrs) { 1726 dev_err(dev, "queue manager info not specified\n"); 1727 ret = -ENODEV; 1728 goto err; 1729 } 1730 ret = knav_queue_init_qmgrs(kdev, qmgrs); 1731 of_node_put(qmgrs); 1732 if (ret) 1733 goto err; 1734 1735 /* get pdsp configuration values from device tree */ 1736 pdsps = of_get_child_by_name(node, "pdsps"); 1737 if (pdsps) { 1738 ret = knav_queue_init_pdsps(kdev, pdsps); 1739 if (ret) 1740 goto err; 1741 1742 ret = knav_queue_start_pdsps(kdev); 1743 if (ret) 1744 goto err; 1745 } 1746 of_node_put(pdsps); 1747 1748 /* get usable queue range values from device tree */ 1749 queue_pools = of_get_child_by_name(node, "queue-pools"); 1750 if (!queue_pools) { 1751 dev_err(dev, "queue-pools not specified\n"); 1752 ret = -ENODEV; 1753 goto err; 1754 } 1755 ret = knav_setup_queue_pools(kdev, queue_pools); 1756 of_node_put(queue_pools); 1757 if (ret) 1758 goto err; 1759 1760 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); 1761 if (ret) { 1762 dev_err(kdev->dev, "could not setup linking ram\n"); 1763 goto err; 1764 } 1765 1766 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); 1767 if (ret) { 1768 /* 1769 * nothing really, we have one linking ram already, so we just 1770 * live within our means 1771 */ 1772 } 1773 1774 ret = knav_queue_setup_link_ram(kdev); 1775 if (ret) 1776 goto err; 1777 1778 regions = of_get_child_by_name(node, "descriptor-regions"); 1779 if (!regions) { 1780 dev_err(dev, "descriptor-regions not specified\n"); 1781 goto err; 1782 } 1783 ret = knav_queue_setup_regions(kdev, regions); 1784 of_node_put(regions); 1785 if (ret) 1786 goto err; 1787 1788 ret = knav_queue_init_queues(kdev); 1789 if (ret < 0) { 1790 dev_err(dev, "hwqueue initialization failed\n"); 1791 goto err; 1792 } 1793 1794 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, 1795 &knav_queue_debug_ops); 1796 return 0; 1797 1798 err: 1799 knav_queue_stop_pdsps(kdev); 1800 knav_queue_free_regions(kdev); 1801 knav_free_queue_ranges(kdev); 1802 pm_runtime_put_sync(&pdev->dev); 1803 pm_runtime_disable(&pdev->dev); 1804 return ret; 1805 } 1806 1807 static int knav_queue_remove(struct platform_device *pdev) 1808 { 1809 /* TODO: Free resources */ 1810 pm_runtime_put_sync(&pdev->dev); 1811 pm_runtime_disable(&pdev->dev); 1812 return 0; 1813 } 1814 1815 /* Match table for of_platform binding */ 1816 static struct of_device_id keystone_qmss_of_match[] = { 1817 { .compatible = "ti,keystone-navigator-qmss", }, 1818 {}, 1819 }; 1820 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); 1821 1822 static struct platform_driver keystone_qmss_driver = { 1823 .probe = knav_queue_probe, 1824 .remove = knav_queue_remove, 1825 .driver = { 1826 .name = "keystone-navigator-qmss", 1827 .of_match_table = keystone_qmss_of_match, 1828 }, 1829 }; 1830 module_platform_driver(keystone_qmss_driver); 1831 1832 MODULE_LICENSE("GPL v2"); 1833 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs"); 1834 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); 1835 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); 1836