1 /* 2 * Qualcomm Technologies HIDMA DMA engine interface 3 * 4 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 /* 17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 18 * Copyright (C) Semihalf 2009 19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 20 * Copyright (C) Alexander Popov, Promcontroller 2014 21 * 22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 23 * (defines, structures and comments) was taken from MPC5121 DMA driver 24 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 25 * 26 * Approved as OSADL project by a majority of OSADL members and funded 27 * by OSADL membership fees in 2009; for details see www.osadl.org. 28 * 29 * This program is free software; you can redistribute it and/or modify it 30 * under the terms of the GNU General Public License as published by the Free 31 * Software Foundation; either version 2 of the License, or (at your option) 32 * any later version. 33 * 34 * This program is distributed in the hope that it will be useful, but WITHOUT 35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 37 * more details. 38 * 39 * The full GNU General Public License is included in this distribution in the 40 * file called COPYING. 41 */ 42 43 /* Linux Foundation elects GPLv2 license only. */ 44 45 #include <linux/dmaengine.h> 46 #include <linux/dma-mapping.h> 47 #include <linux/list.h> 48 #include <linux/module.h> 49 #include <linux/platform_device.h> 50 #include <linux/slab.h> 51 #include <linux/spinlock.h> 52 #include <linux/of_dma.h> 53 #include <linux/property.h> 54 #include <linux/delay.h> 55 #include <linux/acpi.h> 56 #include <linux/irq.h> 57 #include <linux/atomic.h> 58 #include <linux/pm_runtime.h> 59 60 #include "../dmaengine.h" 61 #include "hidma.h" 62 63 /* 64 * Default idle time is 2 seconds. This parameter can 65 * be overridden by changing the following 66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms 67 * during kernel boot. 68 */ 69 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 70 #define HIDMA_ERR_INFO_SW 0xFF 71 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 72 #define HIDMA_NR_DEFAULT_DESC 10 73 74 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) 75 { 76 return container_of(dmadev, struct hidma_dev, ddev); 77 } 78 79 static inline 80 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) 81 { 82 return container_of(_lldevp, struct hidma_dev, lldev); 83 } 84 85 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) 86 { 87 return container_of(dmach, struct hidma_chan, chan); 88 } 89 90 static inline 91 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) 92 { 93 return container_of(t, struct hidma_desc, desc); 94 } 95 96 static void hidma_free(struct hidma_dev *dmadev) 97 { 98 INIT_LIST_HEAD(&dmadev->ddev.channels); 99 } 100 101 static unsigned int nr_desc_prm; 102 module_param(nr_desc_prm, uint, 0644); 103 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); 104 105 106 /* process completed descriptors */ 107 static void hidma_process_completed(struct hidma_chan *mchan) 108 { 109 struct dma_device *ddev = mchan->chan.device; 110 struct hidma_dev *mdma = to_hidma_dev(ddev); 111 struct dma_async_tx_descriptor *desc; 112 dma_cookie_t last_cookie; 113 struct hidma_desc *mdesc; 114 struct hidma_desc *next; 115 unsigned long irqflags; 116 struct list_head list; 117 118 INIT_LIST_HEAD(&list); 119 120 /* Get all completed descriptors */ 121 spin_lock_irqsave(&mchan->lock, irqflags); 122 list_splice_tail_init(&mchan->completed, &list); 123 spin_unlock_irqrestore(&mchan->lock, irqflags); 124 125 /* Execute callbacks and run dependencies */ 126 list_for_each_entry_safe(mdesc, next, &list, node) { 127 enum dma_status llstat; 128 struct dmaengine_desc_callback cb; 129 struct dmaengine_result result; 130 131 desc = &mdesc->desc; 132 last_cookie = desc->cookie; 133 134 spin_lock_irqsave(&mchan->lock, irqflags); 135 dma_cookie_complete(desc); 136 spin_unlock_irqrestore(&mchan->lock, irqflags); 137 138 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 139 dmaengine_desc_get_callback(desc, &cb); 140 141 dma_run_dependencies(desc); 142 143 spin_lock_irqsave(&mchan->lock, irqflags); 144 list_move(&mdesc->node, &mchan->free); 145 146 if (llstat == DMA_COMPLETE) { 147 mchan->last_success = last_cookie; 148 result.result = DMA_TRANS_NOERROR; 149 } else 150 result.result = DMA_TRANS_ABORTED; 151 152 spin_unlock_irqrestore(&mchan->lock, irqflags); 153 154 dmaengine_desc_callback_invoke(&cb, &result); 155 } 156 } 157 158 /* 159 * Called once for each submitted descriptor. 160 * PM is locked once for each descriptor that is currently 161 * in execution. 162 */ 163 static void hidma_callback(void *data) 164 { 165 struct hidma_desc *mdesc = data; 166 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); 167 struct dma_device *ddev = mchan->chan.device; 168 struct hidma_dev *dmadev = to_hidma_dev(ddev); 169 unsigned long irqflags; 170 bool queued = false; 171 172 spin_lock_irqsave(&mchan->lock, irqflags); 173 if (mdesc->node.next) { 174 /* Delete from the active list, add to completed list */ 175 list_move_tail(&mdesc->node, &mchan->completed); 176 queued = true; 177 178 /* calculate the next running descriptor */ 179 mchan->running = list_first_entry(&mchan->active, 180 struct hidma_desc, node); 181 } 182 spin_unlock_irqrestore(&mchan->lock, irqflags); 183 184 hidma_process_completed(mchan); 185 186 if (queued) { 187 pm_runtime_mark_last_busy(dmadev->ddev.dev); 188 pm_runtime_put_autosuspend(dmadev->ddev.dev); 189 } 190 } 191 192 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) 193 { 194 struct hidma_chan *mchan; 195 struct dma_device *ddev; 196 197 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); 198 if (!mchan) 199 return -ENOMEM; 200 201 ddev = &dmadev->ddev; 202 mchan->dma_sig = dma_sig; 203 mchan->dmadev = dmadev; 204 mchan->chan.device = ddev; 205 dma_cookie_init(&mchan->chan); 206 207 INIT_LIST_HEAD(&mchan->free); 208 INIT_LIST_HEAD(&mchan->prepared); 209 INIT_LIST_HEAD(&mchan->active); 210 INIT_LIST_HEAD(&mchan->completed); 211 212 spin_lock_init(&mchan->lock); 213 list_add_tail(&mchan->chan.device_node, &ddev->channels); 214 dmadev->ddev.chancnt++; 215 return 0; 216 } 217 218 static void hidma_issue_task(unsigned long arg) 219 { 220 struct hidma_dev *dmadev = (struct hidma_dev *)arg; 221 222 pm_runtime_get_sync(dmadev->ddev.dev); 223 hidma_ll_start(dmadev->lldev); 224 } 225 226 static void hidma_issue_pending(struct dma_chan *dmach) 227 { 228 struct hidma_chan *mchan = to_hidma_chan(dmach); 229 struct hidma_dev *dmadev = mchan->dmadev; 230 unsigned long flags; 231 int status; 232 233 spin_lock_irqsave(&mchan->lock, flags); 234 if (!mchan->running) { 235 struct hidma_desc *desc = list_first_entry(&mchan->active, 236 struct hidma_desc, 237 node); 238 mchan->running = desc; 239 } 240 spin_unlock_irqrestore(&mchan->lock, flags); 241 242 /* PM will be released in hidma_callback function. */ 243 status = pm_runtime_get(dmadev->ddev.dev); 244 if (status < 0) 245 tasklet_schedule(&dmadev->task); 246 else 247 hidma_ll_start(dmadev->lldev); 248 } 249 250 static inline bool hidma_txn_is_success(dma_cookie_t cookie, 251 dma_cookie_t last_success, dma_cookie_t last_used) 252 { 253 if (last_success <= last_used) { 254 if ((cookie <= last_success) || (cookie > last_used)) 255 return true; 256 } else { 257 if ((cookie <= last_success) && (cookie > last_used)) 258 return true; 259 } 260 return false; 261 } 262 263 static enum dma_status hidma_tx_status(struct dma_chan *dmach, 264 dma_cookie_t cookie, 265 struct dma_tx_state *txstate) 266 { 267 struct hidma_chan *mchan = to_hidma_chan(dmach); 268 enum dma_status ret; 269 270 ret = dma_cookie_status(dmach, cookie, txstate); 271 if (ret == DMA_COMPLETE) { 272 bool is_success; 273 274 is_success = hidma_txn_is_success(cookie, mchan->last_success, 275 dmach->cookie); 276 return is_success ? ret : DMA_ERROR; 277 } 278 279 if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 280 unsigned long flags; 281 dma_cookie_t runcookie; 282 283 spin_lock_irqsave(&mchan->lock, flags); 284 if (mchan->running) 285 runcookie = mchan->running->desc.cookie; 286 else 287 runcookie = -EINVAL; 288 289 if (runcookie == cookie) 290 ret = DMA_PAUSED; 291 292 spin_unlock_irqrestore(&mchan->lock, flags); 293 } 294 295 return ret; 296 } 297 298 /* 299 * Submit descriptor to hardware. 300 * Lock the PM for each descriptor we are sending. 301 */ 302 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) 303 { 304 struct hidma_chan *mchan = to_hidma_chan(txd->chan); 305 struct hidma_dev *dmadev = mchan->dmadev; 306 struct hidma_desc *mdesc; 307 unsigned long irqflags; 308 dma_cookie_t cookie; 309 310 pm_runtime_get_sync(dmadev->ddev.dev); 311 if (!hidma_ll_isenabled(dmadev->lldev)) { 312 pm_runtime_mark_last_busy(dmadev->ddev.dev); 313 pm_runtime_put_autosuspend(dmadev->ddev.dev); 314 return -ENODEV; 315 } 316 317 mdesc = container_of(txd, struct hidma_desc, desc); 318 spin_lock_irqsave(&mchan->lock, irqflags); 319 320 /* Move descriptor to active */ 321 list_move_tail(&mdesc->node, &mchan->active); 322 323 /* Update cookie */ 324 cookie = dma_cookie_assign(txd); 325 326 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); 327 spin_unlock_irqrestore(&mchan->lock, irqflags); 328 329 return cookie; 330 } 331 332 static int hidma_alloc_chan_resources(struct dma_chan *dmach) 333 { 334 struct hidma_chan *mchan = to_hidma_chan(dmach); 335 struct hidma_dev *dmadev = mchan->dmadev; 336 struct hidma_desc *mdesc, *tmp; 337 unsigned long irqflags; 338 LIST_HEAD(descs); 339 unsigned int i; 340 int rc = 0; 341 342 if (mchan->allocated) 343 return 0; 344 345 /* Alloc descriptors for this channel */ 346 for (i = 0; i < dmadev->nr_descriptors; i++) { 347 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); 348 if (!mdesc) { 349 rc = -ENOMEM; 350 break; 351 } 352 dma_async_tx_descriptor_init(&mdesc->desc, dmach); 353 mdesc->desc.tx_submit = hidma_tx_submit; 354 355 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, 356 "DMA engine", hidma_callback, mdesc, 357 &mdesc->tre_ch); 358 if (rc) { 359 dev_err(dmach->device->dev, 360 "channel alloc failed at %u\n", i); 361 kfree(mdesc); 362 break; 363 } 364 list_add_tail(&mdesc->node, &descs); 365 } 366 367 if (rc) { 368 /* return the allocated descriptors */ 369 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 370 hidma_ll_free(dmadev->lldev, mdesc->tre_ch); 371 kfree(mdesc); 372 } 373 return rc; 374 } 375 376 spin_lock_irqsave(&mchan->lock, irqflags); 377 list_splice_tail_init(&descs, &mchan->free); 378 mchan->allocated = true; 379 spin_unlock_irqrestore(&mchan->lock, irqflags); 380 return 1; 381 } 382 383 static struct dma_async_tx_descriptor * 384 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, 385 size_t len, unsigned long flags) 386 { 387 struct hidma_chan *mchan = to_hidma_chan(dmach); 388 struct hidma_desc *mdesc = NULL; 389 struct hidma_dev *mdma = mchan->dmadev; 390 unsigned long irqflags; 391 392 /* Get free descriptor */ 393 spin_lock_irqsave(&mchan->lock, irqflags); 394 if (!list_empty(&mchan->free)) { 395 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 396 list_del(&mdesc->node); 397 } 398 spin_unlock_irqrestore(&mchan->lock, irqflags); 399 400 if (!mdesc) 401 return NULL; 402 403 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 404 src, dest, len, flags); 405 406 /* Place descriptor in prepared list */ 407 spin_lock_irqsave(&mchan->lock, irqflags); 408 list_add_tail(&mdesc->node, &mchan->prepared); 409 spin_unlock_irqrestore(&mchan->lock, irqflags); 410 411 return &mdesc->desc; 412 } 413 414 static int hidma_terminate_channel(struct dma_chan *chan) 415 { 416 struct hidma_chan *mchan = to_hidma_chan(chan); 417 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 418 struct hidma_desc *tmp, *mdesc; 419 unsigned long irqflags; 420 LIST_HEAD(list); 421 int rc; 422 423 pm_runtime_get_sync(dmadev->ddev.dev); 424 /* give completed requests a chance to finish */ 425 hidma_process_completed(mchan); 426 427 spin_lock_irqsave(&mchan->lock, irqflags); 428 mchan->last_success = 0; 429 list_splice_init(&mchan->active, &list); 430 list_splice_init(&mchan->prepared, &list); 431 list_splice_init(&mchan->completed, &list); 432 spin_unlock_irqrestore(&mchan->lock, irqflags); 433 434 /* this suspends the existing transfer */ 435 rc = hidma_ll_disable(dmadev->lldev); 436 if (rc) { 437 dev_err(dmadev->ddev.dev, "channel did not pause\n"); 438 goto out; 439 } 440 441 /* return all user requests */ 442 list_for_each_entry_safe(mdesc, tmp, &list, node) { 443 struct dma_async_tx_descriptor *txd = &mdesc->desc; 444 445 dma_descriptor_unmap(txd); 446 dmaengine_desc_get_callback_invoke(txd, NULL); 447 dma_run_dependencies(txd); 448 449 /* move myself to free_list */ 450 list_move(&mdesc->node, &mchan->free); 451 } 452 453 rc = hidma_ll_enable(dmadev->lldev); 454 out: 455 pm_runtime_mark_last_busy(dmadev->ddev.dev); 456 pm_runtime_put_autosuspend(dmadev->ddev.dev); 457 return rc; 458 } 459 460 static int hidma_terminate_all(struct dma_chan *chan) 461 { 462 struct hidma_chan *mchan = to_hidma_chan(chan); 463 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 464 int rc; 465 466 rc = hidma_terminate_channel(chan); 467 if (rc) 468 return rc; 469 470 /* reinitialize the hardware */ 471 pm_runtime_get_sync(dmadev->ddev.dev); 472 rc = hidma_ll_setup(dmadev->lldev); 473 pm_runtime_mark_last_busy(dmadev->ddev.dev); 474 pm_runtime_put_autosuspend(dmadev->ddev.dev); 475 return rc; 476 } 477 478 static void hidma_free_chan_resources(struct dma_chan *dmach) 479 { 480 struct hidma_chan *mchan = to_hidma_chan(dmach); 481 struct hidma_dev *mdma = mchan->dmadev; 482 struct hidma_desc *mdesc, *tmp; 483 unsigned long irqflags; 484 LIST_HEAD(descs); 485 486 /* terminate running transactions and free descriptors */ 487 hidma_terminate_channel(dmach); 488 489 spin_lock_irqsave(&mchan->lock, irqflags); 490 491 /* Move data */ 492 list_splice_tail_init(&mchan->free, &descs); 493 494 /* Free descriptors */ 495 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 496 hidma_ll_free(mdma->lldev, mdesc->tre_ch); 497 list_del(&mdesc->node); 498 kfree(mdesc); 499 } 500 501 mchan->allocated = 0; 502 spin_unlock_irqrestore(&mchan->lock, irqflags); 503 } 504 505 static int hidma_pause(struct dma_chan *chan) 506 { 507 struct hidma_chan *mchan; 508 struct hidma_dev *dmadev; 509 510 mchan = to_hidma_chan(chan); 511 dmadev = to_hidma_dev(mchan->chan.device); 512 if (!mchan->paused) { 513 pm_runtime_get_sync(dmadev->ddev.dev); 514 if (hidma_ll_disable(dmadev->lldev)) 515 dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 516 mchan->paused = true; 517 pm_runtime_mark_last_busy(dmadev->ddev.dev); 518 pm_runtime_put_autosuspend(dmadev->ddev.dev); 519 } 520 return 0; 521 } 522 523 static int hidma_resume(struct dma_chan *chan) 524 { 525 struct hidma_chan *mchan; 526 struct hidma_dev *dmadev; 527 int rc = 0; 528 529 mchan = to_hidma_chan(chan); 530 dmadev = to_hidma_dev(mchan->chan.device); 531 if (mchan->paused) { 532 pm_runtime_get_sync(dmadev->ddev.dev); 533 rc = hidma_ll_enable(dmadev->lldev); 534 if (!rc) 535 mchan->paused = false; 536 else 537 dev_err(dmadev->ddev.dev, 538 "failed to resume the channel"); 539 pm_runtime_mark_last_busy(dmadev->ddev.dev); 540 pm_runtime_put_autosuspend(dmadev->ddev.dev); 541 } 542 return rc; 543 } 544 545 static irqreturn_t hidma_chirq_handler(int chirq, void *arg) 546 { 547 struct hidma_lldev *lldev = arg; 548 549 /* 550 * All interrupts are request driven. 551 * HW doesn't send an interrupt by itself. 552 */ 553 return hidma_ll_inthandler(chirq, lldev); 554 } 555 556 static ssize_t hidma_show_values(struct device *dev, 557 struct device_attribute *attr, char *buf) 558 { 559 struct platform_device *pdev = to_platform_device(dev); 560 struct hidma_dev *mdev = platform_get_drvdata(pdev); 561 562 buf[0] = 0; 563 564 if (strcmp(attr->attr.name, "chid") == 0) 565 sprintf(buf, "%d\n", mdev->chidx); 566 567 return strlen(buf); 568 } 569 570 static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, 571 int mode) 572 { 573 struct device_attribute *attrs; 574 char *name_copy; 575 576 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), 577 GFP_KERNEL); 578 if (!attrs) 579 return -ENOMEM; 580 581 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); 582 if (!name_copy) 583 return -ENOMEM; 584 585 attrs->attr.name = name_copy; 586 attrs->attr.mode = mode; 587 attrs->show = hidma_show_values; 588 sysfs_attr_init(&attrs->attr); 589 590 return device_create_file(dev->ddev.dev, attrs); 591 } 592 593 static int hidma_probe(struct platform_device *pdev) 594 { 595 struct hidma_dev *dmadev; 596 struct resource *trca_resource; 597 struct resource *evca_resource; 598 int chirq; 599 void __iomem *evca; 600 void __iomem *trca; 601 int rc; 602 603 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); 604 pm_runtime_use_autosuspend(&pdev->dev); 605 pm_runtime_set_active(&pdev->dev); 606 pm_runtime_enable(&pdev->dev); 607 608 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 609 trca = devm_ioremap_resource(&pdev->dev, trca_resource); 610 if (IS_ERR(trca)) { 611 rc = -ENOMEM; 612 goto bailout; 613 } 614 615 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); 616 evca = devm_ioremap_resource(&pdev->dev, evca_resource); 617 if (IS_ERR(evca)) { 618 rc = -ENOMEM; 619 goto bailout; 620 } 621 622 /* 623 * This driver only handles the channel IRQs. 624 * Common IRQ is handled by the management driver. 625 */ 626 chirq = platform_get_irq(pdev, 0); 627 if (chirq < 0) { 628 rc = -ENODEV; 629 goto bailout; 630 } 631 632 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 633 if (!dmadev) { 634 rc = -ENOMEM; 635 goto bailout; 636 } 637 638 INIT_LIST_HEAD(&dmadev->ddev.channels); 639 spin_lock_init(&dmadev->lock); 640 dmadev->ddev.dev = &pdev->dev; 641 pm_runtime_get_sync(dmadev->ddev.dev); 642 643 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); 644 if (WARN_ON(!pdev->dev.dma_mask)) { 645 rc = -ENXIO; 646 goto dmafree; 647 } 648 649 dmadev->dev_evca = evca; 650 dmadev->evca_resource = evca_resource; 651 dmadev->dev_trca = trca; 652 dmadev->trca_resource = trca_resource; 653 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; 654 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; 655 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; 656 dmadev->ddev.device_tx_status = hidma_tx_status; 657 dmadev->ddev.device_issue_pending = hidma_issue_pending; 658 dmadev->ddev.device_pause = hidma_pause; 659 dmadev->ddev.device_resume = hidma_resume; 660 dmadev->ddev.device_terminate_all = hidma_terminate_all; 661 dmadev->ddev.copy_align = 8; 662 663 device_property_read_u32(&pdev->dev, "desc-count", 664 &dmadev->nr_descriptors); 665 666 if (!dmadev->nr_descriptors && nr_desc_prm) 667 dmadev->nr_descriptors = nr_desc_prm; 668 669 if (!dmadev->nr_descriptors) 670 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; 671 672 dmadev->chidx = readl(dmadev->dev_trca + 0x28); 673 674 /* Set DMA mask to 64 bits. */ 675 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 676 if (rc) { 677 dev_warn(&pdev->dev, "unable to set coherent mask to 64"); 678 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 679 if (rc) 680 goto dmafree; 681 } 682 683 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, 684 dmadev->nr_descriptors, dmadev->dev_trca, 685 dmadev->dev_evca, dmadev->chidx); 686 if (!dmadev->lldev) { 687 rc = -EPROBE_DEFER; 688 goto dmafree; 689 } 690 691 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, 692 "qcom-hidma", dmadev->lldev); 693 if (rc) 694 goto uninit; 695 696 INIT_LIST_HEAD(&dmadev->ddev.channels); 697 rc = hidma_chan_init(dmadev, 0); 698 if (rc) 699 goto uninit; 700 701 rc = dma_async_device_register(&dmadev->ddev); 702 if (rc) 703 goto uninit; 704 705 dmadev->irq = chirq; 706 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); 707 hidma_debug_init(dmadev); 708 hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); 709 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); 710 platform_set_drvdata(pdev, dmadev); 711 pm_runtime_mark_last_busy(dmadev->ddev.dev); 712 pm_runtime_put_autosuspend(dmadev->ddev.dev); 713 return 0; 714 715 uninit: 716 hidma_debug_uninit(dmadev); 717 hidma_ll_uninit(dmadev->lldev); 718 dmafree: 719 if (dmadev) 720 hidma_free(dmadev); 721 bailout: 722 pm_runtime_put_sync(&pdev->dev); 723 pm_runtime_disable(&pdev->dev); 724 return rc; 725 } 726 727 static int hidma_remove(struct platform_device *pdev) 728 { 729 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 730 731 pm_runtime_get_sync(dmadev->ddev.dev); 732 dma_async_device_unregister(&dmadev->ddev); 733 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); 734 tasklet_kill(&dmadev->task); 735 hidma_debug_uninit(dmadev); 736 hidma_ll_uninit(dmadev->lldev); 737 hidma_free(dmadev); 738 739 dev_info(&pdev->dev, "HI-DMA engine removed\n"); 740 pm_runtime_put_sync_suspend(&pdev->dev); 741 pm_runtime_disable(&pdev->dev); 742 743 return 0; 744 } 745 746 #if IS_ENABLED(CONFIG_ACPI) 747 static const struct acpi_device_id hidma_acpi_ids[] = { 748 {"QCOM8061"}, 749 {}, 750 }; 751 #endif 752 753 static const struct of_device_id hidma_match[] = { 754 {.compatible = "qcom,hidma-1.0",}, 755 {}, 756 }; 757 MODULE_DEVICE_TABLE(of, hidma_match); 758 759 static struct platform_driver hidma_driver = { 760 .probe = hidma_probe, 761 .remove = hidma_remove, 762 .driver = { 763 .name = "hidma", 764 .of_match_table = hidma_match, 765 .acpi_match_table = ACPI_PTR(hidma_acpi_ids), 766 }, 767 }; 768 769 module_platform_driver(hidma_driver); 770 MODULE_LICENSE("GPL v2"); 771