167a2003eSSinan Kaya /*
267a2003eSSinan Kaya * Qualcomm Technologies HIDMA DMA engine interface
367a2003eSSinan Kaya *
413058e33SSinan Kaya * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
567a2003eSSinan Kaya *
667a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify
767a2003eSSinan Kaya * it under the terms of the GNU General Public License version 2 and
867a2003eSSinan Kaya * only version 2 as published by the Free Software Foundation.
967a2003eSSinan Kaya *
1067a2003eSSinan Kaya * This program is distributed in the hope that it will be useful,
1167a2003eSSinan Kaya * but WITHOUT ANY WARRANTY; without even the implied warranty of
1267a2003eSSinan Kaya * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1367a2003eSSinan Kaya * GNU General Public License for more details.
1467a2003eSSinan Kaya */
1567a2003eSSinan Kaya
1667a2003eSSinan Kaya /*
1767a2003eSSinan Kaya * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
1867a2003eSSinan Kaya * Copyright (C) Semihalf 2009
1967a2003eSSinan Kaya * Copyright (C) Ilya Yanok, Emcraft Systems 2010
2067a2003eSSinan Kaya * Copyright (C) Alexander Popov, Promcontroller 2014
2167a2003eSSinan Kaya *
2267a2003eSSinan Kaya * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
2367a2003eSSinan Kaya * (defines, structures and comments) was taken from MPC5121 DMA driver
2467a2003eSSinan Kaya * written by Hongjun Chen <hong-jun.chen@freescale.com>.
2567a2003eSSinan Kaya *
2667a2003eSSinan Kaya * Approved as OSADL project by a majority of OSADL members and funded
2767a2003eSSinan Kaya * by OSADL membership fees in 2009; for details see www.osadl.org.
2867a2003eSSinan Kaya *
2967a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify it
3067a2003eSSinan Kaya * under the terms of the GNU General Public License as published by the Free
3167a2003eSSinan Kaya * Software Foundation; either version 2 of the License, or (at your option)
3267a2003eSSinan Kaya * any later version.
3367a2003eSSinan Kaya *
3467a2003eSSinan Kaya * This program is distributed in the hope that it will be useful, but WITHOUT
3567a2003eSSinan Kaya * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3667a2003eSSinan Kaya * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3767a2003eSSinan Kaya * more details.
3867a2003eSSinan Kaya *
3967a2003eSSinan Kaya * The full GNU General Public License is included in this distribution in the
4067a2003eSSinan Kaya * file called COPYING.
4167a2003eSSinan Kaya */
4267a2003eSSinan Kaya
4367a2003eSSinan Kaya /* Linux Foundation elects GPLv2 license only. */
4467a2003eSSinan Kaya
4567a2003eSSinan Kaya #include <linux/dmaengine.h>
4667a2003eSSinan Kaya #include <linux/dma-mapping.h>
4767a2003eSSinan Kaya #include <linux/list.h>
48*897500c7SRob Herring #include <linux/mod_devicetable.h>
4967a2003eSSinan Kaya #include <linux/module.h>
5067a2003eSSinan Kaya #include <linux/platform_device.h>
5167a2003eSSinan Kaya #include <linux/slab.h>
5267a2003eSSinan Kaya #include <linux/spinlock.h>
5367a2003eSSinan Kaya #include <linux/of_dma.h>
5467a2003eSSinan Kaya #include <linux/property.h>
5567a2003eSSinan Kaya #include <linux/delay.h>
5667a2003eSSinan Kaya #include <linux/acpi.h>
5767a2003eSSinan Kaya #include <linux/irq.h>
5867a2003eSSinan Kaya #include <linux/atomic.h>
5967a2003eSSinan Kaya #include <linux/pm_runtime.h>
601c0e3e82SSinan Kaya #include <linux/msi.h>
6167a2003eSSinan Kaya
6267a2003eSSinan Kaya #include "../dmaengine.h"
6367a2003eSSinan Kaya #include "hidma.h"
6467a2003eSSinan Kaya
6567a2003eSSinan Kaya /*
6667a2003eSSinan Kaya * Default idle time is 2 seconds. This parameter can
6767a2003eSSinan Kaya * be overridden by changing the following
6867a2003eSSinan Kaya * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
6967a2003eSSinan Kaya * during kernel boot.
7067a2003eSSinan Kaya */
7167a2003eSSinan Kaya #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
7267a2003eSSinan Kaya #define HIDMA_ERR_INFO_SW 0xFF
7367a2003eSSinan Kaya #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
7467a2003eSSinan Kaya #define HIDMA_NR_DEFAULT_DESC 10
751c0e3e82SSinan Kaya #define HIDMA_MSI_INTS 11
7667a2003eSSinan Kaya
to_hidma_dev(struct dma_device * dmadev)7767a2003eSSinan Kaya static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
7867a2003eSSinan Kaya {
7967a2003eSSinan Kaya return container_of(dmadev, struct hidma_dev, ddev);
8067a2003eSSinan Kaya }
8167a2003eSSinan Kaya
8267a2003eSSinan Kaya static inline
to_hidma_dev_from_lldev(struct hidma_lldev ** _lldevp)8367a2003eSSinan Kaya struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
8467a2003eSSinan Kaya {
8567a2003eSSinan Kaya return container_of(_lldevp, struct hidma_dev, lldev);
8667a2003eSSinan Kaya }
8767a2003eSSinan Kaya
to_hidma_chan(struct dma_chan * dmach)8867a2003eSSinan Kaya static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
8967a2003eSSinan Kaya {
9067a2003eSSinan Kaya return container_of(dmach, struct hidma_chan, chan);
9167a2003eSSinan Kaya }
9267a2003eSSinan Kaya
hidma_free(struct hidma_dev * dmadev)9367a2003eSSinan Kaya static void hidma_free(struct hidma_dev *dmadev)
9467a2003eSSinan Kaya {
9567a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
9667a2003eSSinan Kaya }
9767a2003eSSinan Kaya
9867a2003eSSinan Kaya static unsigned int nr_desc_prm;
9967a2003eSSinan Kaya module_param(nr_desc_prm, uint, 0644);
10067a2003eSSinan Kaya MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
10167a2003eSSinan Kaya
10295fbfb7aSSinan Kaya enum hidma_cap {
10395fbfb7aSSinan Kaya HIDMA_MSI_CAP = 1,
104b5419adcSSinan Kaya HIDMA_IDENTITY_CAP,
10595fbfb7aSSinan Kaya };
10667a2003eSSinan Kaya
10767a2003eSSinan Kaya /* process completed descriptors */
hidma_process_completed(struct hidma_chan * mchan)10867a2003eSSinan Kaya static void hidma_process_completed(struct hidma_chan *mchan)
10967a2003eSSinan Kaya {
11067a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device;
11167a2003eSSinan Kaya struct hidma_dev *mdma = to_hidma_dev(ddev);
11267a2003eSSinan Kaya struct dma_async_tx_descriptor *desc;
11367a2003eSSinan Kaya dma_cookie_t last_cookie;
11467a2003eSSinan Kaya struct hidma_desc *mdesc;
1158a31f8b5SSinan Kaya struct hidma_desc *next;
11667a2003eSSinan Kaya unsigned long irqflags;
11767a2003eSSinan Kaya struct list_head list;
11867a2003eSSinan Kaya
11967a2003eSSinan Kaya INIT_LIST_HEAD(&list);
12067a2003eSSinan Kaya
12167a2003eSSinan Kaya /* Get all completed descriptors */
12267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
12367a2003eSSinan Kaya list_splice_tail_init(&mchan->completed, &list);
12467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
12567a2003eSSinan Kaya
12667a2003eSSinan Kaya /* Execute callbacks and run dependencies */
1278a31f8b5SSinan Kaya list_for_each_entry_safe(mdesc, next, &list, node) {
12867a2003eSSinan Kaya enum dma_status llstat;
1298a31f8b5SSinan Kaya struct dmaengine_desc_callback cb;
13055c370e5SSinan Kaya struct dmaengine_result result;
13167a2003eSSinan Kaya
13267a2003eSSinan Kaya desc = &mdesc->desc;
133793ae66cSSinan Kaya last_cookie = desc->cookie;
13467a2003eSSinan Kaya
135546c0547SShunyong Yang llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
136546c0547SShunyong Yang
13767a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
138546c0547SShunyong Yang if (llstat == DMA_COMPLETE) {
139546c0547SShunyong Yang mchan->last_success = last_cookie;
140546c0547SShunyong Yang result.result = DMA_TRANS_NOERROR;
141546c0547SShunyong Yang } else {
142546c0547SShunyong Yang result.result = DMA_TRANS_ABORTED;
143546c0547SShunyong Yang }
144546c0547SShunyong Yang
14567a2003eSSinan Kaya dma_cookie_complete(desc);
14667a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
14767a2003eSSinan Kaya
1488a31f8b5SSinan Kaya dmaengine_desc_get_callback(desc, &cb);
14967a2003eSSinan Kaya
15067a2003eSSinan Kaya dma_run_dependencies(desc);
15167a2003eSSinan Kaya
15267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
1538a31f8b5SSinan Kaya list_move(&mdesc->node, &mchan->free);
15455c370e5SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
15555c370e5SSinan Kaya
15655c370e5SSinan Kaya dmaengine_desc_callback_invoke(&cb, &result);
1578a31f8b5SSinan Kaya }
15867a2003eSSinan Kaya }
15967a2003eSSinan Kaya
16067a2003eSSinan Kaya /*
16167a2003eSSinan Kaya * Called once for each submitted descriptor.
16267a2003eSSinan Kaya * PM is locked once for each descriptor that is currently
16367a2003eSSinan Kaya * in execution.
16467a2003eSSinan Kaya */
hidma_callback(void * data)16567a2003eSSinan Kaya static void hidma_callback(void *data)
16667a2003eSSinan Kaya {
16767a2003eSSinan Kaya struct hidma_desc *mdesc = data;
16867a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
16967a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device;
17067a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(ddev);
17167a2003eSSinan Kaya unsigned long irqflags;
17267a2003eSSinan Kaya bool queued = false;
17367a2003eSSinan Kaya
17467a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
17567a2003eSSinan Kaya if (mdesc->node.next) {
17667a2003eSSinan Kaya /* Delete from the active list, add to completed list */
17767a2003eSSinan Kaya list_move_tail(&mdesc->node, &mchan->completed);
17867a2003eSSinan Kaya queued = true;
17967a2003eSSinan Kaya
18067a2003eSSinan Kaya /* calculate the next running descriptor */
18167a2003eSSinan Kaya mchan->running = list_first_entry(&mchan->active,
18267a2003eSSinan Kaya struct hidma_desc, node);
18367a2003eSSinan Kaya }
18467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
18567a2003eSSinan Kaya
18667a2003eSSinan Kaya hidma_process_completed(mchan);
18767a2003eSSinan Kaya
18867a2003eSSinan Kaya if (queued) {
18967a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
19067a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
19167a2003eSSinan Kaya }
19267a2003eSSinan Kaya }
19367a2003eSSinan Kaya
hidma_chan_init(struct hidma_dev * dmadev,u32 dma_sig)19467a2003eSSinan Kaya static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
19567a2003eSSinan Kaya {
19667a2003eSSinan Kaya struct hidma_chan *mchan;
19767a2003eSSinan Kaya struct dma_device *ddev;
19867a2003eSSinan Kaya
19967a2003eSSinan Kaya mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
20067a2003eSSinan Kaya if (!mchan)
20167a2003eSSinan Kaya return -ENOMEM;
20267a2003eSSinan Kaya
20367a2003eSSinan Kaya ddev = &dmadev->ddev;
20467a2003eSSinan Kaya mchan->dma_sig = dma_sig;
20567a2003eSSinan Kaya mchan->dmadev = dmadev;
20667a2003eSSinan Kaya mchan->chan.device = ddev;
20767a2003eSSinan Kaya dma_cookie_init(&mchan->chan);
20867a2003eSSinan Kaya
20967a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->free);
21067a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->prepared);
21167a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->active);
21267a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->completed);
21399efdb3eSSinan Kaya INIT_LIST_HEAD(&mchan->queued);
21467a2003eSSinan Kaya
21567a2003eSSinan Kaya spin_lock_init(&mchan->lock);
21667a2003eSSinan Kaya list_add_tail(&mchan->chan.device_node, &ddev->channels);
21767a2003eSSinan Kaya return 0;
21867a2003eSSinan Kaya }
21967a2003eSSinan Kaya
hidma_issue_task(struct tasklet_struct * t)22000c4747aSAllen Pais static void hidma_issue_task(struct tasklet_struct *t)
22167a2003eSSinan Kaya {
22200c4747aSAllen Pais struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
22367a2003eSSinan Kaya
22467a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
22567a2003eSSinan Kaya hidma_ll_start(dmadev->lldev);
22667a2003eSSinan Kaya }
22767a2003eSSinan Kaya
hidma_issue_pending(struct dma_chan * dmach)22867a2003eSSinan Kaya static void hidma_issue_pending(struct dma_chan *dmach)
22967a2003eSSinan Kaya {
23067a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
23167a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
23267a2003eSSinan Kaya unsigned long flags;
23399efdb3eSSinan Kaya struct hidma_desc *qdesc, *next;
23467a2003eSSinan Kaya int status;
23567a2003eSSinan Kaya
23667a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags);
23799efdb3eSSinan Kaya list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
23899efdb3eSSinan Kaya hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
23999efdb3eSSinan Kaya list_move_tail(&qdesc->node, &mchan->active);
24099efdb3eSSinan Kaya }
24199efdb3eSSinan Kaya
24267a2003eSSinan Kaya if (!mchan->running) {
24367a2003eSSinan Kaya struct hidma_desc *desc = list_first_entry(&mchan->active,
24467a2003eSSinan Kaya struct hidma_desc,
24567a2003eSSinan Kaya node);
24667a2003eSSinan Kaya mchan->running = desc;
24767a2003eSSinan Kaya }
24867a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags);
24967a2003eSSinan Kaya
25067a2003eSSinan Kaya /* PM will be released in hidma_callback function. */
25167a2003eSSinan Kaya status = pm_runtime_get(dmadev->ddev.dev);
25267a2003eSSinan Kaya if (status < 0)
25367a2003eSSinan Kaya tasklet_schedule(&dmadev->task);
25467a2003eSSinan Kaya else
25567a2003eSSinan Kaya hidma_ll_start(dmadev->lldev);
25667a2003eSSinan Kaya }
25767a2003eSSinan Kaya
hidma_txn_is_success(dma_cookie_t cookie,dma_cookie_t last_success,dma_cookie_t last_used)258793ae66cSSinan Kaya static inline bool hidma_txn_is_success(dma_cookie_t cookie,
259793ae66cSSinan Kaya dma_cookie_t last_success, dma_cookie_t last_used)
260793ae66cSSinan Kaya {
261793ae66cSSinan Kaya if (last_success <= last_used) {
262793ae66cSSinan Kaya if ((cookie <= last_success) || (cookie > last_used))
263793ae66cSSinan Kaya return true;
264793ae66cSSinan Kaya } else {
265793ae66cSSinan Kaya if ((cookie <= last_success) && (cookie > last_used))
266793ae66cSSinan Kaya return true;
267793ae66cSSinan Kaya }
268793ae66cSSinan Kaya return false;
269793ae66cSSinan Kaya }
270793ae66cSSinan Kaya
hidma_tx_status(struct dma_chan * dmach,dma_cookie_t cookie,struct dma_tx_state * txstate)27167a2003eSSinan Kaya static enum dma_status hidma_tx_status(struct dma_chan *dmach,
27267a2003eSSinan Kaya dma_cookie_t cookie,
27367a2003eSSinan Kaya struct dma_tx_state *txstate)
27467a2003eSSinan Kaya {
27567a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
27667a2003eSSinan Kaya enum dma_status ret;
27767a2003eSSinan Kaya
27867a2003eSSinan Kaya ret = dma_cookie_status(dmach, cookie, txstate);
279793ae66cSSinan Kaya if (ret == DMA_COMPLETE) {
280793ae66cSSinan Kaya bool is_success;
281793ae66cSSinan Kaya
282793ae66cSSinan Kaya is_success = hidma_txn_is_success(cookie, mchan->last_success,
283793ae66cSSinan Kaya dmach->cookie);
284793ae66cSSinan Kaya return is_success ? ret : DMA_ERROR;
285793ae66cSSinan Kaya }
28667a2003eSSinan Kaya
28767a2003eSSinan Kaya if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
28867a2003eSSinan Kaya unsigned long flags;
28967a2003eSSinan Kaya dma_cookie_t runcookie;
29067a2003eSSinan Kaya
29167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags);
29267a2003eSSinan Kaya if (mchan->running)
29367a2003eSSinan Kaya runcookie = mchan->running->desc.cookie;
29467a2003eSSinan Kaya else
29567a2003eSSinan Kaya runcookie = -EINVAL;
29667a2003eSSinan Kaya
29767a2003eSSinan Kaya if (runcookie == cookie)
29867a2003eSSinan Kaya ret = DMA_PAUSED;
29967a2003eSSinan Kaya
30067a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags);
30167a2003eSSinan Kaya }
30267a2003eSSinan Kaya
30367a2003eSSinan Kaya return ret;
30467a2003eSSinan Kaya }
30567a2003eSSinan Kaya
30667a2003eSSinan Kaya /*
30767a2003eSSinan Kaya * Submit descriptor to hardware.
30867a2003eSSinan Kaya * Lock the PM for each descriptor we are sending.
30967a2003eSSinan Kaya */
hidma_tx_submit(struct dma_async_tx_descriptor * txd)31067a2003eSSinan Kaya static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
31167a2003eSSinan Kaya {
31267a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(txd->chan);
31367a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
31467a2003eSSinan Kaya struct hidma_desc *mdesc;
31567a2003eSSinan Kaya unsigned long irqflags;
31667a2003eSSinan Kaya dma_cookie_t cookie;
31767a2003eSSinan Kaya
31867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
31967a2003eSSinan Kaya if (!hidma_ll_isenabled(dmadev->lldev)) {
32067a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
32167a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
32267a2003eSSinan Kaya return -ENODEV;
32367a2003eSSinan Kaya }
32499efdb3eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
32599efdb3eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
32667a2003eSSinan Kaya
32767a2003eSSinan Kaya mdesc = container_of(txd, struct hidma_desc, desc);
32867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
32967a2003eSSinan Kaya
33099efdb3eSSinan Kaya /* Move descriptor to queued */
33199efdb3eSSinan Kaya list_move_tail(&mdesc->node, &mchan->queued);
33267a2003eSSinan Kaya
33367a2003eSSinan Kaya /* Update cookie */
33467a2003eSSinan Kaya cookie = dma_cookie_assign(txd);
33567a2003eSSinan Kaya
33667a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
33767a2003eSSinan Kaya
33867a2003eSSinan Kaya return cookie;
33967a2003eSSinan Kaya }
34067a2003eSSinan Kaya
hidma_alloc_chan_resources(struct dma_chan * dmach)34167a2003eSSinan Kaya static int hidma_alloc_chan_resources(struct dma_chan *dmach)
34267a2003eSSinan Kaya {
34367a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
34467a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
34567a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp;
34667a2003eSSinan Kaya unsigned long irqflags;
34767a2003eSSinan Kaya LIST_HEAD(descs);
34867a2003eSSinan Kaya unsigned int i;
34967a2003eSSinan Kaya int rc = 0;
35067a2003eSSinan Kaya
35167a2003eSSinan Kaya if (mchan->allocated)
35267a2003eSSinan Kaya return 0;
35367a2003eSSinan Kaya
35467a2003eSSinan Kaya /* Alloc descriptors for this channel */
35567a2003eSSinan Kaya for (i = 0; i < dmadev->nr_descriptors; i++) {
35667a2003eSSinan Kaya mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
35767a2003eSSinan Kaya if (!mdesc) {
35867a2003eSSinan Kaya rc = -ENOMEM;
35967a2003eSSinan Kaya break;
36067a2003eSSinan Kaya }
36167a2003eSSinan Kaya dma_async_tx_descriptor_init(&mdesc->desc, dmach);
36267a2003eSSinan Kaya mdesc->desc.tx_submit = hidma_tx_submit;
36367a2003eSSinan Kaya
36467a2003eSSinan Kaya rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
36567a2003eSSinan Kaya "DMA engine", hidma_callback, mdesc,
36667a2003eSSinan Kaya &mdesc->tre_ch);
36767a2003eSSinan Kaya if (rc) {
36867a2003eSSinan Kaya dev_err(dmach->device->dev,
36967a2003eSSinan Kaya "channel alloc failed at %u\n", i);
37067a2003eSSinan Kaya kfree(mdesc);
37167a2003eSSinan Kaya break;
37267a2003eSSinan Kaya }
37367a2003eSSinan Kaya list_add_tail(&mdesc->node, &descs);
37467a2003eSSinan Kaya }
37567a2003eSSinan Kaya
37667a2003eSSinan Kaya if (rc) {
37767a2003eSSinan Kaya /* return the allocated descriptors */
37867a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) {
37967a2003eSSinan Kaya hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
38067a2003eSSinan Kaya kfree(mdesc);
38167a2003eSSinan Kaya }
38267a2003eSSinan Kaya return rc;
38367a2003eSSinan Kaya }
38467a2003eSSinan Kaya
38567a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
38667a2003eSSinan Kaya list_splice_tail_init(&descs, &mchan->free);
38767a2003eSSinan Kaya mchan->allocated = true;
38867a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
38967a2003eSSinan Kaya return 1;
39067a2003eSSinan Kaya }
39167a2003eSSinan Kaya
39267a2003eSSinan Kaya static struct dma_async_tx_descriptor *
hidma_prep_dma_memcpy(struct dma_chan * dmach,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)39367a2003eSSinan Kaya hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
39467a2003eSSinan Kaya size_t len, unsigned long flags)
39567a2003eSSinan Kaya {
39667a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
39767a2003eSSinan Kaya struct hidma_desc *mdesc = NULL;
39867a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
39967a2003eSSinan Kaya unsigned long irqflags;
40067a2003eSSinan Kaya
40167a2003eSSinan Kaya /* Get free descriptor */
40267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
40367a2003eSSinan Kaya if (!list_empty(&mchan->free)) {
40467a2003eSSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
40567a2003eSSinan Kaya list_del(&mdesc->node);
40667a2003eSSinan Kaya }
40767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
40867a2003eSSinan Kaya
40967a2003eSSinan Kaya if (!mdesc)
41067a2003eSSinan Kaya return NULL;
41167a2003eSSinan Kaya
412875aac8aSShunyong Yang mdesc->desc.flags = flags;
41367a2003eSSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
4145e2db086SSinan Kaya src, dest, len, flags,
4155e2db086SSinan Kaya HIDMA_TRE_MEMCPY);
4165e2db086SSinan Kaya
4175e2db086SSinan Kaya /* Place descriptor in prepared list */
4185e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
4195e2db086SSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared);
4205e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
4215e2db086SSinan Kaya
4225e2db086SSinan Kaya return &mdesc->desc;
4235e2db086SSinan Kaya }
4245e2db086SSinan Kaya
4255e2db086SSinan Kaya static struct dma_async_tx_descriptor *
hidma_prep_dma_memset(struct dma_chan * dmach,dma_addr_t dest,int value,size_t len,unsigned long flags)4265e2db086SSinan Kaya hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
4275e2db086SSinan Kaya size_t len, unsigned long flags)
4285e2db086SSinan Kaya {
4295e2db086SSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
4305e2db086SSinan Kaya struct hidma_desc *mdesc = NULL;
4315e2db086SSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
4325e2db086SSinan Kaya unsigned long irqflags;
433643a4a85SBen Walker u64 byte_pattern, fill_pattern;
4345e2db086SSinan Kaya
4355e2db086SSinan Kaya /* Get free descriptor */
4365e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
4375e2db086SSinan Kaya if (!list_empty(&mchan->free)) {
4385e2db086SSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
4395e2db086SSinan Kaya list_del(&mdesc->node);
4405e2db086SSinan Kaya }
4415e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
4425e2db086SSinan Kaya
4435e2db086SSinan Kaya if (!mdesc)
4445e2db086SSinan Kaya return NULL;
4455e2db086SSinan Kaya
446643a4a85SBen Walker byte_pattern = (char)value;
447643a4a85SBen Walker fill_pattern = (byte_pattern << 56) |
448643a4a85SBen Walker (byte_pattern << 48) |
449643a4a85SBen Walker (byte_pattern << 40) |
450643a4a85SBen Walker (byte_pattern << 32) |
451643a4a85SBen Walker (byte_pattern << 24) |
452643a4a85SBen Walker (byte_pattern << 16) |
453643a4a85SBen Walker (byte_pattern << 8) |
454643a4a85SBen Walker byte_pattern;
455643a4a85SBen Walker
456875aac8aSShunyong Yang mdesc->desc.flags = flags;
4575e2db086SSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
458643a4a85SBen Walker fill_pattern, dest, len, flags,
4595e2db086SSinan Kaya HIDMA_TRE_MEMSET);
46067a2003eSSinan Kaya
46167a2003eSSinan Kaya /* Place descriptor in prepared list */
46267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
46367a2003eSSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared);
46467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
46567a2003eSSinan Kaya
46667a2003eSSinan Kaya return &mdesc->desc;
46767a2003eSSinan Kaya }
46867a2003eSSinan Kaya
hidma_terminate_channel(struct dma_chan * chan)46967a2003eSSinan Kaya static int hidma_terminate_channel(struct dma_chan *chan)
47067a2003eSSinan Kaya {
47167a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan);
47267a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
47367a2003eSSinan Kaya struct hidma_desc *tmp, *mdesc;
47467a2003eSSinan Kaya unsigned long irqflags;
47567a2003eSSinan Kaya LIST_HEAD(list);
47667a2003eSSinan Kaya int rc;
47767a2003eSSinan Kaya
47867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
47967a2003eSSinan Kaya /* give completed requests a chance to finish */
48067a2003eSSinan Kaya hidma_process_completed(mchan);
48167a2003eSSinan Kaya
48267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
483793ae66cSSinan Kaya mchan->last_success = 0;
48467a2003eSSinan Kaya list_splice_init(&mchan->active, &list);
48567a2003eSSinan Kaya list_splice_init(&mchan->prepared, &list);
48667a2003eSSinan Kaya list_splice_init(&mchan->completed, &list);
48799efdb3eSSinan Kaya list_splice_init(&mchan->queued, &list);
48867a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
48967a2003eSSinan Kaya
49067a2003eSSinan Kaya /* this suspends the existing transfer */
491d1615ca2SSinan Kaya rc = hidma_ll_disable(dmadev->lldev);
49267a2003eSSinan Kaya if (rc) {
49367a2003eSSinan Kaya dev_err(dmadev->ddev.dev, "channel did not pause\n");
49467a2003eSSinan Kaya goto out;
49567a2003eSSinan Kaya }
49667a2003eSSinan Kaya
49767a2003eSSinan Kaya /* return all user requests */
49867a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &list, node) {
49967a2003eSSinan Kaya struct dma_async_tx_descriptor *txd = &mdesc->desc;
50067a2003eSSinan Kaya
50167a2003eSSinan Kaya dma_descriptor_unmap(txd);
5025ade6683SDave Jiang dmaengine_desc_get_callback_invoke(txd, NULL);
50367a2003eSSinan Kaya dma_run_dependencies(txd);
50467a2003eSSinan Kaya
50567a2003eSSinan Kaya /* move myself to free_list */
50667a2003eSSinan Kaya list_move(&mdesc->node, &mchan->free);
50767a2003eSSinan Kaya }
50867a2003eSSinan Kaya
509d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev);
51067a2003eSSinan Kaya out:
51167a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
51267a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
51367a2003eSSinan Kaya return rc;
51467a2003eSSinan Kaya }
51567a2003eSSinan Kaya
hidma_terminate_all(struct dma_chan * chan)51667a2003eSSinan Kaya static int hidma_terminate_all(struct dma_chan *chan)
51767a2003eSSinan Kaya {
51867a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan);
51967a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
52067a2003eSSinan Kaya int rc;
52167a2003eSSinan Kaya
52267a2003eSSinan Kaya rc = hidma_terminate_channel(chan);
52367a2003eSSinan Kaya if (rc)
52467a2003eSSinan Kaya return rc;
52567a2003eSSinan Kaya
52667a2003eSSinan Kaya /* reinitialize the hardware */
52767a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
52867a2003eSSinan Kaya rc = hidma_ll_setup(dmadev->lldev);
52967a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
53067a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
53167a2003eSSinan Kaya return rc;
53267a2003eSSinan Kaya }
53367a2003eSSinan Kaya
hidma_free_chan_resources(struct dma_chan * dmach)53467a2003eSSinan Kaya static void hidma_free_chan_resources(struct dma_chan *dmach)
53567a2003eSSinan Kaya {
53667a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
53767a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
53867a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp;
53967a2003eSSinan Kaya unsigned long irqflags;
54067a2003eSSinan Kaya LIST_HEAD(descs);
54167a2003eSSinan Kaya
54267a2003eSSinan Kaya /* terminate running transactions and free descriptors */
54367a2003eSSinan Kaya hidma_terminate_channel(dmach);
54467a2003eSSinan Kaya
54567a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
54667a2003eSSinan Kaya
54767a2003eSSinan Kaya /* Move data */
54867a2003eSSinan Kaya list_splice_tail_init(&mchan->free, &descs);
54967a2003eSSinan Kaya
55067a2003eSSinan Kaya /* Free descriptors */
55167a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) {
55267a2003eSSinan Kaya hidma_ll_free(mdma->lldev, mdesc->tre_ch);
55367a2003eSSinan Kaya list_del(&mdesc->node);
55467a2003eSSinan Kaya kfree(mdesc);
55567a2003eSSinan Kaya }
55667a2003eSSinan Kaya
557d24224deSJason Yan mchan->allocated = false;
55867a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
55967a2003eSSinan Kaya }
56067a2003eSSinan Kaya
hidma_pause(struct dma_chan * chan)56167a2003eSSinan Kaya static int hidma_pause(struct dma_chan *chan)
56267a2003eSSinan Kaya {
56367a2003eSSinan Kaya struct hidma_chan *mchan;
56467a2003eSSinan Kaya struct hidma_dev *dmadev;
56567a2003eSSinan Kaya
56667a2003eSSinan Kaya mchan = to_hidma_chan(chan);
56767a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device);
56867a2003eSSinan Kaya if (!mchan->paused) {
56967a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
570d1615ca2SSinan Kaya if (hidma_ll_disable(dmadev->lldev))
57167a2003eSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n");
57267a2003eSSinan Kaya mchan->paused = true;
57367a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
57467a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
57567a2003eSSinan Kaya }
57667a2003eSSinan Kaya return 0;
57767a2003eSSinan Kaya }
57867a2003eSSinan Kaya
hidma_resume(struct dma_chan * chan)57967a2003eSSinan Kaya static int hidma_resume(struct dma_chan *chan)
58067a2003eSSinan Kaya {
58167a2003eSSinan Kaya struct hidma_chan *mchan;
58267a2003eSSinan Kaya struct hidma_dev *dmadev;
58367a2003eSSinan Kaya int rc = 0;
58467a2003eSSinan Kaya
58567a2003eSSinan Kaya mchan = to_hidma_chan(chan);
58667a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device);
58767a2003eSSinan Kaya if (mchan->paused) {
58867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
589d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev);
59067a2003eSSinan Kaya if (!rc)
59167a2003eSSinan Kaya mchan->paused = false;
59267a2003eSSinan Kaya else
59367a2003eSSinan Kaya dev_err(dmadev->ddev.dev,
59467a2003eSSinan Kaya "failed to resume the channel");
59567a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
59667a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
59767a2003eSSinan Kaya }
59867a2003eSSinan Kaya return rc;
59967a2003eSSinan Kaya }
60067a2003eSSinan Kaya
hidma_chirq_handler(int chirq,void * arg)60167a2003eSSinan Kaya static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
60267a2003eSSinan Kaya {
60367a2003eSSinan Kaya struct hidma_lldev *lldev = arg;
60467a2003eSSinan Kaya
60567a2003eSSinan Kaya /*
60667a2003eSSinan Kaya * All interrupts are request driven.
60767a2003eSSinan Kaya * HW doesn't send an interrupt by itself.
60867a2003eSSinan Kaya */
60967a2003eSSinan Kaya return hidma_ll_inthandler(chirq, lldev);
61067a2003eSSinan Kaya }
61167a2003eSSinan Kaya
61213e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
hidma_chirq_handler_msi(int chirq,void * arg)6131c0e3e82SSinan Kaya static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
6141c0e3e82SSinan Kaya {
6151c0e3e82SSinan Kaya struct hidma_lldev **lldevp = arg;
6161c0e3e82SSinan Kaya struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
6171c0e3e82SSinan Kaya
6181c0e3e82SSinan Kaya return hidma_ll_inthandler_msi(chirq, *lldevp,
6191c0e3e82SSinan Kaya 1 << (chirq - dmadev->msi_virqbase));
6201c0e3e82SSinan Kaya }
6218cc12b26SArnd Bergmann #endif
6221c0e3e82SSinan Kaya
hidma_show_values(struct device * dev,struct device_attribute * attr,char * buf)62342d236f8SSinan Kaya static ssize_t hidma_show_values(struct device *dev,
62442d236f8SSinan Kaya struct device_attribute *attr, char *buf)
62542d236f8SSinan Kaya {
6266af6c371SWolfram Sang struct hidma_dev *mdev = dev_get_drvdata(dev);
62742d236f8SSinan Kaya
62842d236f8SSinan Kaya buf[0] = 0;
62942d236f8SSinan Kaya
63042d236f8SSinan Kaya if (strcmp(attr->attr.name, "chid") == 0)
63142d236f8SSinan Kaya sprintf(buf, "%d\n", mdev->chidx);
63242d236f8SSinan Kaya
63342d236f8SSinan Kaya return strlen(buf);
63442d236f8SSinan Kaya }
63542d236f8SSinan Kaya
hidma_sysfs_uninit(struct hidma_dev * dev)636c6e4584dSSinan Kaya static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
637c6e4584dSSinan Kaya {
638c6e4584dSSinan Kaya device_remove_file(dev->ddev.dev, dev->chid_attrs);
639c6e4584dSSinan Kaya }
640c6e4584dSSinan Kaya
641c6e4584dSSinan Kaya static struct device_attribute*
hidma_create_sysfs_entry(struct hidma_dev * dev,char * name,int mode)642c6e4584dSSinan Kaya hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
64342d236f8SSinan Kaya {
64442d236f8SSinan Kaya struct device_attribute *attrs;
64542d236f8SSinan Kaya char *name_copy;
64642d236f8SSinan Kaya
64742d236f8SSinan Kaya attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
64842d236f8SSinan Kaya GFP_KERNEL);
64942d236f8SSinan Kaya if (!attrs)
650c6e4584dSSinan Kaya return NULL;
65142d236f8SSinan Kaya
65242d236f8SSinan Kaya name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
65342d236f8SSinan Kaya if (!name_copy)
654c6e4584dSSinan Kaya return NULL;
65542d236f8SSinan Kaya
65642d236f8SSinan Kaya attrs->attr.name = name_copy;
65742d236f8SSinan Kaya attrs->attr.mode = mode;
65842d236f8SSinan Kaya attrs->show = hidma_show_values;
65942d236f8SSinan Kaya sysfs_attr_init(&attrs->attr);
66042d236f8SSinan Kaya
661c6e4584dSSinan Kaya return attrs;
662c6e4584dSSinan Kaya }
663c6e4584dSSinan Kaya
hidma_sysfs_init(struct hidma_dev * dev)664c6e4584dSSinan Kaya static int hidma_sysfs_init(struct hidma_dev *dev)
665c6e4584dSSinan Kaya {
666c6e4584dSSinan Kaya dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
667c6e4584dSSinan Kaya if (!dev->chid_attrs)
668c6e4584dSSinan Kaya return -ENOMEM;
669c6e4584dSSinan Kaya
670c6e4584dSSinan Kaya return device_create_file(dev->ddev.dev, dev->chid_attrs);
67142d236f8SSinan Kaya }
67242d236f8SSinan Kaya
67313e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
hidma_write_msi_msg(struct msi_desc * desc,struct msi_msg * msg)6741c0e3e82SSinan Kaya static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
6751c0e3e82SSinan Kaya {
6761c0e3e82SSinan Kaya struct device *dev = msi_desc_to_dev(desc);
6771c0e3e82SSinan Kaya struct hidma_dev *dmadev = dev_get_drvdata(dev);
6781c0e3e82SSinan Kaya
679dba27c7fSThomas Gleixner if (!desc->msi_index) {
6801c0e3e82SSinan Kaya writel(msg->address_lo, dmadev->dev_evca + 0x118);
6811c0e3e82SSinan Kaya writel(msg->address_hi, dmadev->dev_evca + 0x11C);
6821c0e3e82SSinan Kaya writel(msg->data, dmadev->dev_evca + 0x120);
6831c0e3e82SSinan Kaya }
6841c0e3e82SSinan Kaya }
6851c0e3e82SSinan Kaya #endif
6861c0e3e82SSinan Kaya
hidma_free_msis(struct hidma_dev * dmadev)6871c0e3e82SSinan Kaya static void hidma_free_msis(struct hidma_dev *dmadev)
6881c0e3e82SSinan Kaya {
68913e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
6901c0e3e82SSinan Kaya struct device *dev = dmadev->ddev.dev;
6911900c962SThomas Gleixner int i, virq;
6921c0e3e82SSinan Kaya
6931900c962SThomas Gleixner for (i = 0; i < HIDMA_MSI_INTS; i++) {
6941900c962SThomas Gleixner virq = msi_get_virq(dev, i);
6951900c962SThomas Gleixner if (virq)
6961900c962SThomas Gleixner devm_free_irq(dev, virq, &dmadev->lldev);
6971900c962SThomas Gleixner }
6981c0e3e82SSinan Kaya
6991c0e3e82SSinan Kaya platform_msi_domain_free_irqs(dev);
7001c0e3e82SSinan Kaya #endif
7011c0e3e82SSinan Kaya }
7021c0e3e82SSinan Kaya
hidma_request_msi(struct hidma_dev * dmadev,struct platform_device * pdev)7031c0e3e82SSinan Kaya static int hidma_request_msi(struct hidma_dev *dmadev,
7041c0e3e82SSinan Kaya struct platform_device *pdev)
7051c0e3e82SSinan Kaya {
70613e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
7071900c962SThomas Gleixner int rc, i, virq;
7081c0e3e82SSinan Kaya
7091c0e3e82SSinan Kaya rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
7101c0e3e82SSinan Kaya hidma_write_msi_msg);
7111c0e3e82SSinan Kaya if (rc)
7121c0e3e82SSinan Kaya return rc;
7131c0e3e82SSinan Kaya
7141900c962SThomas Gleixner for (i = 0; i < HIDMA_MSI_INTS; i++) {
7151900c962SThomas Gleixner virq = msi_get_virq(&pdev->dev, i);
7161900c962SThomas Gleixner rc = devm_request_irq(&pdev->dev, virq,
7171c0e3e82SSinan Kaya hidma_chirq_handler_msi,
7181c0e3e82SSinan Kaya 0, "qcom-hidma-msi",
7191c0e3e82SSinan Kaya &dmadev->lldev);
7201900c962SThomas Gleixner if (rc)
7211c0e3e82SSinan Kaya break;
7221900c962SThomas Gleixner if (!i)
7231900c962SThomas Gleixner dmadev->msi_virqbase = virq;
7241c0e3e82SSinan Kaya }
7251c0e3e82SSinan Kaya
7261c0e3e82SSinan Kaya if (rc) {
7271c0e3e82SSinan Kaya /* free allocated MSI interrupts above */
7281900c962SThomas Gleixner for (--i; i >= 0; i--) {
7291900c962SThomas Gleixner virq = msi_get_virq(&pdev->dev, i);
7301900c962SThomas Gleixner devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
7311c0e3e82SSinan Kaya }
7321900c962SThomas Gleixner dev_warn(&pdev->dev,
7331900c962SThomas Gleixner "failed to request MSI irq, falling back to wired IRQ\n");
7341c0e3e82SSinan Kaya } else {
7351c0e3e82SSinan Kaya /* Add callback to free MSIs on teardown */
7361c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, true);
7371c0e3e82SSinan Kaya }
7381c0e3e82SSinan Kaya return rc;
7391c0e3e82SSinan Kaya #else
7401c0e3e82SSinan Kaya return -EINVAL;
7411c0e3e82SSinan Kaya #endif
7421c0e3e82SSinan Kaya }
7431c0e3e82SSinan Kaya
hidma_test_capability(struct device * dev,enum hidma_cap test_cap)74495fbfb7aSSinan Kaya static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
7451c0e3e82SSinan Kaya {
74695fbfb7aSSinan Kaya enum hidma_cap cap;
7471c0e3e82SSinan Kaya
74895fbfb7aSSinan Kaya cap = (enum hidma_cap) device_get_match_data(dev);
74995fbfb7aSSinan Kaya return cap ? ((cap & test_cap) > 0) : 0;
7501c0e3e82SSinan Kaya }
7511c0e3e82SSinan Kaya
hidma_probe(struct platform_device * pdev)75267a2003eSSinan Kaya static int hidma_probe(struct platform_device *pdev)
75367a2003eSSinan Kaya {
75467a2003eSSinan Kaya struct hidma_dev *dmadev;
75567a2003eSSinan Kaya struct resource *trca_resource;
75667a2003eSSinan Kaya struct resource *evca_resource;
75767a2003eSSinan Kaya int chirq;
75867a2003eSSinan Kaya void __iomem *evca;
75967a2003eSSinan Kaya void __iomem *trca;
76067a2003eSSinan Kaya int rc;
7611c0e3e82SSinan Kaya bool msi;
76267a2003eSSinan Kaya
76367a2003eSSinan Kaya pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
76467a2003eSSinan Kaya pm_runtime_use_autosuspend(&pdev->dev);
76567a2003eSSinan Kaya pm_runtime_set_active(&pdev->dev);
76667a2003eSSinan Kaya pm_runtime_enable(&pdev->dev);
76767a2003eSSinan Kaya
768f1e47b83SYangtao Li trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
76967a2003eSSinan Kaya if (IS_ERR(trca)) {
770f1e47b83SYangtao Li rc = PTR_ERR(trca);
77167a2003eSSinan Kaya goto bailout;
77267a2003eSSinan Kaya }
77367a2003eSSinan Kaya
774f1e47b83SYangtao Li evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
77567a2003eSSinan Kaya if (IS_ERR(evca)) {
776f1e47b83SYangtao Li rc = PTR_ERR(evca);
77767a2003eSSinan Kaya goto bailout;
77867a2003eSSinan Kaya }
77967a2003eSSinan Kaya
78067a2003eSSinan Kaya /*
78167a2003eSSinan Kaya * This driver only handles the channel IRQs.
78267a2003eSSinan Kaya * Common IRQ is handled by the management driver.
78367a2003eSSinan Kaya */
78467a2003eSSinan Kaya chirq = platform_get_irq(pdev, 0);
78567a2003eSSinan Kaya if (chirq < 0) {
786f1e47b83SYangtao Li rc = chirq;
78767a2003eSSinan Kaya goto bailout;
78867a2003eSSinan Kaya }
78967a2003eSSinan Kaya
79067a2003eSSinan Kaya dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
79167a2003eSSinan Kaya if (!dmadev) {
79267a2003eSSinan Kaya rc = -ENOMEM;
79367a2003eSSinan Kaya goto bailout;
79467a2003eSSinan Kaya }
79567a2003eSSinan Kaya
79667a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
79767a2003eSSinan Kaya spin_lock_init(&dmadev->lock);
79867a2003eSSinan Kaya dmadev->ddev.dev = &pdev->dev;
79967a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
80067a2003eSSinan Kaya
80167a2003eSSinan Kaya dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
8025e2db086SSinan Kaya dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
80367a2003eSSinan Kaya if (WARN_ON(!pdev->dev.dma_mask)) {
80467a2003eSSinan Kaya rc = -ENXIO;
80567a2003eSSinan Kaya goto dmafree;
80667a2003eSSinan Kaya }
80767a2003eSSinan Kaya
80867a2003eSSinan Kaya dmadev->dev_evca = evca;
80967a2003eSSinan Kaya dmadev->evca_resource = evca_resource;
81067a2003eSSinan Kaya dmadev->dev_trca = trca;
81167a2003eSSinan Kaya dmadev->trca_resource = trca_resource;
81267a2003eSSinan Kaya dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
8135e2db086SSinan Kaya dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
81467a2003eSSinan Kaya dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
81567a2003eSSinan Kaya dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
81667a2003eSSinan Kaya dmadev->ddev.device_tx_status = hidma_tx_status;
81767a2003eSSinan Kaya dmadev->ddev.device_issue_pending = hidma_issue_pending;
81867a2003eSSinan Kaya dmadev->ddev.device_pause = hidma_pause;
81967a2003eSSinan Kaya dmadev->ddev.device_resume = hidma_resume;
82067a2003eSSinan Kaya dmadev->ddev.device_terminate_all = hidma_terminate_all;
82167a2003eSSinan Kaya dmadev->ddev.copy_align = 8;
82267a2003eSSinan Kaya
8231c0e3e82SSinan Kaya /*
8241c0e3e82SSinan Kaya * Determine the MSI capability of the platform. Old HW doesn't
8251c0e3e82SSinan Kaya * support MSI.
8261c0e3e82SSinan Kaya */
82795fbfb7aSSinan Kaya msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
82867a2003eSSinan Kaya device_property_read_u32(&pdev->dev, "desc-count",
82967a2003eSSinan Kaya &dmadev->nr_descriptors);
83067a2003eSSinan Kaya
83113058e33SSinan Kaya if (nr_desc_prm) {
83213058e33SSinan Kaya dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
83313058e33SSinan Kaya nr_desc_prm);
83467a2003eSSinan Kaya dmadev->nr_descriptors = nr_desc_prm;
83513058e33SSinan Kaya }
83667a2003eSSinan Kaya
83767a2003eSSinan Kaya if (!dmadev->nr_descriptors)
83867a2003eSSinan Kaya dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
83967a2003eSSinan Kaya
840b5419adcSSinan Kaya if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
841b5419adcSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x40);
842b5419adcSSinan Kaya else
84367a2003eSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x28);
84467a2003eSSinan Kaya
84567a2003eSSinan Kaya /* Set DMA mask to 64 bits. */
84667a2003eSSinan Kaya rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
84767a2003eSSinan Kaya if (rc) {
84867a2003eSSinan Kaya dev_warn(&pdev->dev, "unable to set coherent mask to 64");
84967a2003eSSinan Kaya goto dmafree;
85067a2003eSSinan Kaya }
85167a2003eSSinan Kaya
85267a2003eSSinan Kaya dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
85367a2003eSSinan Kaya dmadev->nr_descriptors, dmadev->dev_trca,
85467a2003eSSinan Kaya dmadev->dev_evca, dmadev->chidx);
85567a2003eSSinan Kaya if (!dmadev->lldev) {
85667a2003eSSinan Kaya rc = -EPROBE_DEFER;
85767a2003eSSinan Kaya goto dmafree;
85867a2003eSSinan Kaya }
85967a2003eSSinan Kaya
8601c0e3e82SSinan Kaya platform_set_drvdata(pdev, dmadev);
8611c0e3e82SSinan Kaya if (msi)
8621c0e3e82SSinan Kaya rc = hidma_request_msi(dmadev, pdev);
8631c0e3e82SSinan Kaya
8641c0e3e82SSinan Kaya if (!msi || rc) {
8651c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, false);
8661c0e3e82SSinan Kaya rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
8671c0e3e82SSinan Kaya 0, "qcom-hidma", dmadev->lldev);
86867a2003eSSinan Kaya if (rc)
86967a2003eSSinan Kaya goto uninit;
8701c0e3e82SSinan Kaya }
87167a2003eSSinan Kaya
87267a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
87367a2003eSSinan Kaya rc = hidma_chan_init(dmadev, 0);
87467a2003eSSinan Kaya if (rc)
87567a2003eSSinan Kaya goto uninit;
87667a2003eSSinan Kaya
87767a2003eSSinan Kaya rc = dma_async_device_register(&dmadev->ddev);
87867a2003eSSinan Kaya if (rc)
87967a2003eSSinan Kaya goto uninit;
88067a2003eSSinan Kaya
88167a2003eSSinan Kaya dmadev->irq = chirq;
88200c4747aSAllen Pais tasklet_setup(&dmadev->task, hidma_issue_task);
883570d0176SSinan Kaya hidma_debug_init(dmadev);
884c6e4584dSSinan Kaya hidma_sysfs_init(dmadev);
88567a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
88667a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
88767a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
88867a2003eSSinan Kaya return 0;
88967a2003eSSinan Kaya
89067a2003eSSinan Kaya uninit:
8911c0e3e82SSinan Kaya if (msi)
8921c0e3e82SSinan Kaya hidma_free_msis(dmadev);
8931c0e3e82SSinan Kaya
89467a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev);
89567a2003eSSinan Kaya dmafree:
89667a2003eSSinan Kaya if (dmadev)
89767a2003eSSinan Kaya hidma_free(dmadev);
89867a2003eSSinan Kaya bailout:
89967a2003eSSinan Kaya pm_runtime_put_sync(&pdev->dev);
90067a2003eSSinan Kaya pm_runtime_disable(&pdev->dev);
90167a2003eSSinan Kaya return rc;
90267a2003eSSinan Kaya }
90367a2003eSSinan Kaya
hidma_shutdown(struct platform_device * pdev)904dc7c733aSSinan Kaya static void hidma_shutdown(struct platform_device *pdev)
905dc7c733aSSinan Kaya {
906dc7c733aSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev);
907dc7c733aSSinan Kaya
908dc7c733aSSinan Kaya dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
909dc7c733aSSinan Kaya
910dc7c733aSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
911dc7c733aSSinan Kaya if (hidma_ll_disable(dmadev->lldev))
912dc7c733aSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n");
913dc7c733aSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
914dc7c733aSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
915dc7c733aSSinan Kaya
916dc7c733aSSinan Kaya }
917dc7c733aSSinan Kaya
hidma_remove(struct platform_device * pdev)91867a2003eSSinan Kaya static int hidma_remove(struct platform_device *pdev)
91967a2003eSSinan Kaya {
92067a2003eSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev);
92167a2003eSSinan Kaya
92267a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
92367a2003eSSinan Kaya dma_async_device_unregister(&dmadev->ddev);
9241c0e3e82SSinan Kaya if (!dmadev->lldev->msi_support)
92567a2003eSSinan Kaya devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
9261c0e3e82SSinan Kaya else
9271c0e3e82SSinan Kaya hidma_free_msis(dmadev);
9281c0e3e82SSinan Kaya
929bd16934aSVinod Koul tasklet_kill(&dmadev->task);
930c6e4584dSSinan Kaya hidma_sysfs_uninit(dmadev);
931570d0176SSinan Kaya hidma_debug_uninit(dmadev);
93267a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev);
93367a2003eSSinan Kaya hidma_free(dmadev);
93467a2003eSSinan Kaya
93567a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine removed\n");
93667a2003eSSinan Kaya pm_runtime_put_sync_suspend(&pdev->dev);
93767a2003eSSinan Kaya pm_runtime_disable(&pdev->dev);
93867a2003eSSinan Kaya
93967a2003eSSinan Kaya return 0;
94067a2003eSSinan Kaya }
94167a2003eSSinan Kaya
94267a2003eSSinan Kaya #if IS_ENABLED(CONFIG_ACPI)
94367a2003eSSinan Kaya static const struct acpi_device_id hidma_acpi_ids[] = {
94467a2003eSSinan Kaya {"QCOM8061"},
94595fbfb7aSSinan Kaya {"QCOM8062", HIDMA_MSI_CAP},
946b5419adcSSinan Kaya {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
94767a2003eSSinan Kaya {},
94867a2003eSSinan Kaya };
94975ff7668SSinan Kaya MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
95067a2003eSSinan Kaya #endif
95167a2003eSSinan Kaya
95267a2003eSSinan Kaya static const struct of_device_id hidma_match[] = {
95367a2003eSSinan Kaya {.compatible = "qcom,hidma-1.0",},
95495fbfb7aSSinan Kaya {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
955b5419adcSSinan Kaya {.compatible = "qcom,hidma-1.2",
956b5419adcSSinan Kaya .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
95767a2003eSSinan Kaya {},
95867a2003eSSinan Kaya };
95967a2003eSSinan Kaya MODULE_DEVICE_TABLE(of, hidma_match);
96067a2003eSSinan Kaya
96167a2003eSSinan Kaya static struct platform_driver hidma_driver = {
96267a2003eSSinan Kaya .probe = hidma_probe,
96367a2003eSSinan Kaya .remove = hidma_remove,
964dc7c733aSSinan Kaya .shutdown = hidma_shutdown,
96567a2003eSSinan Kaya .driver = {
96667a2003eSSinan Kaya .name = "hidma",
96767a2003eSSinan Kaya .of_match_table = hidma_match,
96867a2003eSSinan Kaya .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
96967a2003eSSinan Kaya },
97067a2003eSSinan Kaya };
97167a2003eSSinan Kaya
97267a2003eSSinan Kaya module_platform_driver(hidma_driver);
97367a2003eSSinan Kaya MODULE_LICENSE("GPL v2");
974