1a61127c2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
29bc89cd8SDan Williams /*
39bc89cd8SDan Williams * core routines for the asynchronous memory transfer/transform api
49bc89cd8SDan Williams *
59bc89cd8SDan Williams * Copyright © 2006, Intel Corporation.
69bc89cd8SDan Williams *
79bc89cd8SDan Williams * Dan Williams <dan.j.williams@intel.com>
89bc89cd8SDan Williams *
99bc89cd8SDan Williams * with architecture considerations by:
109bc89cd8SDan Williams * Neil Brown <neilb@suse.de>
119bc89cd8SDan Williams * Jeff Garzik <jeff@garzik.org>
129bc89cd8SDan Williams */
1382524746SFranck Bui-Huu #include <linux/rculist.h>
144bb33cc8SPaul Gortmaker #include <linux/module.h>
159bc89cd8SDan Williams #include <linux/kernel.h>
169bc89cd8SDan Williams #include <linux/async_tx.h>
179bc89cd8SDan Williams
189bc89cd8SDan Williams #ifdef CONFIG_DMA_ENGINE
async_tx_init(void)19bec08513SDan Williams static int __init async_tx_init(void)
209bc89cd8SDan Williams {
21729b5d1bSDan Williams async_dmaengine_get();
229bc89cd8SDan Williams
239bc89cd8SDan Williams printk(KERN_INFO "async_tx: api initialized (async)\n");
249bc89cd8SDan Williams
259bc89cd8SDan Williams return 0;
269bc89cd8SDan Williams }
279bc89cd8SDan Williams
async_tx_exit(void)289bc89cd8SDan Williams static void __exit async_tx_exit(void)
299bc89cd8SDan Williams {
30729b5d1bSDan Williams async_dmaengine_put();
319bc89cd8SDan Williams }
329bc89cd8SDan Williams
33af1f951eSDan Williams module_init(async_tx_init);
34af1f951eSDan Williams module_exit(async_tx_exit);
35af1f951eSDan Williams
369bc89cd8SDan Williams /**
3747437b2cSDan Williams * __async_tx_find_channel - find a channel to carry out the operation or let
389bc89cd8SDan Williams * the transaction execute synchronously
39a08abd8cSDan Williams * @submit: transaction dependency and submission modifiers
409bc89cd8SDan Williams * @tx_type: transaction type
419bc89cd8SDan Williams */
429bc89cd8SDan Williams struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl * submit,enum dma_transaction_type tx_type)43a08abd8cSDan Williams __async_tx_find_channel(struct async_submit_ctl *submit,
449bc89cd8SDan Williams enum dma_transaction_type tx_type)
459bc89cd8SDan Williams {
46a08abd8cSDan Williams struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
47a08abd8cSDan Williams
489bc89cd8SDan Williams /* see if we can keep the chain on one channel */
499bc89cd8SDan Williams if (depend_tx &&
509bc89cd8SDan Williams dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
519bc89cd8SDan Williams return depend_tx->chan;
52729b5d1bSDan Williams return async_dma_find_channel(tx_type);
539bc89cd8SDan Williams }
5447437b2cSDan Williams EXPORT_SYMBOL_GPL(__async_tx_find_channel);
559bc89cd8SDan Williams #endif
569bc89cd8SDan Williams
5719242d72SDan Williams
5819242d72SDan Williams /**
5919242d72SDan Williams * async_tx_channel_switch - queue an interrupt descriptor with a dependency
6019242d72SDan Williams * pre-attached.
6119242d72SDan Williams * @depend_tx: the operation that must finish before the new operation runs
6219242d72SDan Williams * @tx: the new operation
6319242d72SDan Williams */
6419242d72SDan Williams static void
async_tx_channel_switch(struct dma_async_tx_descriptor * depend_tx,struct dma_async_tx_descriptor * tx)6519242d72SDan Williams async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
6619242d72SDan Williams struct dma_async_tx_descriptor *tx)
6719242d72SDan Williams {
6895475e57SDan Williams struct dma_chan *chan = depend_tx->chan;
6995475e57SDan Williams struct dma_device *device = chan->device;
7019242d72SDan Williams struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
7119242d72SDan Williams
7219242d72SDan Williams /* first check to see if we can still append to depend_tx */
73caa20d97SDan Williams txd_lock(depend_tx);
74caa20d97SDan Williams if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
75caa20d97SDan Williams txd_chain(depend_tx, tx);
7619242d72SDan Williams intr_tx = NULL;
7719242d72SDan Williams }
78caa20d97SDan Williams txd_unlock(depend_tx);
7919242d72SDan Williams
8095475e57SDan Williams /* attached dependency, flush the parent channel */
8195475e57SDan Williams if (!intr_tx) {
8295475e57SDan Williams device->device_issue_pending(chan);
8319242d72SDan Williams return;
8495475e57SDan Williams }
8519242d72SDan Williams
8619242d72SDan Williams /* see if we can schedule an interrupt
8719242d72SDan Williams * otherwise poll for completion
8819242d72SDan Williams */
8919242d72SDan Williams if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
90636bdeaaSDan Williams intr_tx = device->device_prep_dma_interrupt(chan, 0);
9119242d72SDan Williams else
9219242d72SDan Williams intr_tx = NULL;
9319242d72SDan Williams
9419242d72SDan Williams if (intr_tx) {
9519242d72SDan Williams intr_tx->callback = NULL;
9619242d72SDan Williams intr_tx->callback_param = NULL;
97caa20d97SDan Williams /* safe to chain outside the lock since we know we are
9819242d72SDan Williams * not submitted yet
9919242d72SDan Williams */
100caa20d97SDan Williams txd_chain(intr_tx, tx);
10119242d72SDan Williams
10219242d72SDan Williams /* check if we need to append */
103caa20d97SDan Williams txd_lock(depend_tx);
104caa20d97SDan Williams if (txd_parent(depend_tx)) {
105caa20d97SDan Williams txd_chain(depend_tx, intr_tx);
10619242d72SDan Williams async_tx_ack(intr_tx);
10719242d72SDan Williams intr_tx = NULL;
10819242d72SDan Williams }
109caa20d97SDan Williams txd_unlock(depend_tx);
11019242d72SDan Williams
11119242d72SDan Williams if (intr_tx) {
112caa20d97SDan Williams txd_clear_parent(intr_tx);
11319242d72SDan Williams intr_tx->tx_submit(intr_tx);
11419242d72SDan Williams async_tx_ack(intr_tx);
11519242d72SDan Williams }
11695475e57SDan Williams device->device_issue_pending(chan);
11719242d72SDan Williams } else {
118157efa8cSVinod Koul if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
1197d283397SBartlomiej Zolnierkiewicz panic("%s: DMA error waiting for depend_tx\n",
12019242d72SDan Williams __func__);
12119242d72SDan Williams tx->tx_submit(tx);
12219242d72SDan Williams }
12319242d72SDan Williams }
12419242d72SDan Williams
12519242d72SDan Williams
12619242d72SDan Williams /**
127*0dee6cd2SRandy Dunlap * enum submit_disposition - flags for routing an incoming operation
12819242d72SDan Williams * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
12919242d72SDan Williams * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
13019242d72SDan Williams * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
131a08abd8cSDan Williams *
132a08abd8cSDan Williams * while holding depend_tx->lock we must avoid submitting new operations
133a08abd8cSDan Williams * to prevent a circular locking dependency with drivers that already
134a08abd8cSDan Williams * hold a channel lock when calling async_tx_run_dependencies.
13519242d72SDan Williams */
13619242d72SDan Williams enum submit_disposition {
13719242d72SDan Williams ASYNC_TX_SUBMITTED,
13819242d72SDan Williams ASYNC_TX_CHANNEL_SWITCH,
13919242d72SDan Williams ASYNC_TX_DIRECT_SUBMIT,
14019242d72SDan Williams };
14119242d72SDan Williams
1429bc89cd8SDan Williams void
async_tx_submit(struct dma_chan * chan,struct dma_async_tx_descriptor * tx,struct async_submit_ctl * submit)1439bc89cd8SDan Williams async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
144a08abd8cSDan Williams struct async_submit_ctl *submit)
1459bc89cd8SDan Williams {
146a08abd8cSDan Williams struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
147a08abd8cSDan Williams
148a08abd8cSDan Williams tx->callback = submit->cb_fn;
149a08abd8cSDan Williams tx->callback_param = submit->cb_param;
1509bc89cd8SDan Williams
15119242d72SDan Williams if (depend_tx) {
15219242d72SDan Williams enum submit_disposition s;
1539bc89cd8SDan Williams
15419242d72SDan Williams /* sanity check the dependency chain:
15519242d72SDan Williams * 1/ if ack is already set then we cannot be sure
15619242d72SDan Williams * we are referring to the correct operation
15719242d72SDan Williams * 2/ dependencies are 1:1 i.e. two transactions can
15819242d72SDan Williams * not depend on the same parent
15919242d72SDan Williams */
160caa20d97SDan Williams BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
161caa20d97SDan Williams txd_parent(tx));
16219242d72SDan Williams
16319242d72SDan Williams /* the lock prevents async_tx_run_dependencies from missing
16419242d72SDan Williams * the setting of ->next when ->parent != NULL
16519242d72SDan Williams */
166caa20d97SDan Williams txd_lock(depend_tx);
167caa20d97SDan Williams if (txd_parent(depend_tx)) {
16819242d72SDan Williams /* we have a parent so we can not submit directly
16919242d72SDan Williams * if we are staying on the same channel: append
17019242d72SDan Williams * else: channel switch
17119242d72SDan Williams */
17219242d72SDan Williams if (depend_tx->chan == chan) {
173caa20d97SDan Williams txd_chain(depend_tx, tx);
17419242d72SDan Williams s = ASYNC_TX_SUBMITTED;
17519242d72SDan Williams } else
17619242d72SDan Williams s = ASYNC_TX_CHANNEL_SWITCH;
17719242d72SDan Williams } else {
17819242d72SDan Williams /* we do not have a parent so we may be able to submit
17919242d72SDan Williams * directly if we are staying on the same channel
18019242d72SDan Williams */
18119242d72SDan Williams if (depend_tx->chan == chan)
18219242d72SDan Williams s = ASYNC_TX_DIRECT_SUBMIT;
18319242d72SDan Williams else
18419242d72SDan Williams s = ASYNC_TX_CHANNEL_SWITCH;
1859bc89cd8SDan Williams }
186caa20d97SDan Williams txd_unlock(depend_tx);
1879bc89cd8SDan Williams
18819242d72SDan Williams switch (s) {
18919242d72SDan Williams case ASYNC_TX_SUBMITTED:
19019242d72SDan Williams break;
19119242d72SDan Williams case ASYNC_TX_CHANNEL_SWITCH:
19219242d72SDan Williams async_tx_channel_switch(depend_tx, tx);
19319242d72SDan Williams break;
19419242d72SDan Williams case ASYNC_TX_DIRECT_SUBMIT:
195caa20d97SDan Williams txd_clear_parent(tx);
19619242d72SDan Williams tx->tx_submit(tx);
19719242d72SDan Williams break;
19819242d72SDan Williams }
1999bc89cd8SDan Williams } else {
200caa20d97SDan Williams txd_clear_parent(tx);
2019bc89cd8SDan Williams tx->tx_submit(tx);
2029bc89cd8SDan Williams }
2039bc89cd8SDan Williams
204a08abd8cSDan Williams if (submit->flags & ASYNC_TX_ACK)
2059bc89cd8SDan Williams async_tx_ack(tx);
2069bc89cd8SDan Williams
20788ba2aa5SDan Williams if (depend_tx)
2089bc89cd8SDan Williams async_tx_ack(depend_tx);
2099bc89cd8SDan Williams }
2109bc89cd8SDan Williams EXPORT_SYMBOL_GPL(async_tx_submit);
2119bc89cd8SDan Williams
2129bc89cd8SDan Williams /**
213a08abd8cSDan Williams * async_trigger_callback - schedules the callback function to be run
214a08abd8cSDan Williams * @submit: submission and completion parameters
215a08abd8cSDan Williams *
216a08abd8cSDan Williams * honored flags: ASYNC_TX_ACK
217a08abd8cSDan Williams *
218a08abd8cSDan Williams * The callback is run after any dependent operations have completed.
2199bc89cd8SDan Williams */
2209bc89cd8SDan Williams struct dma_async_tx_descriptor *
async_trigger_callback(struct async_submit_ctl * submit)221a08abd8cSDan Williams async_trigger_callback(struct async_submit_ctl *submit)
2229bc89cd8SDan Williams {
2239bc89cd8SDan Williams struct dma_chan *chan;
2249bc89cd8SDan Williams struct dma_device *device;
2259bc89cd8SDan Williams struct dma_async_tx_descriptor *tx;
226a08abd8cSDan Williams struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
2279bc89cd8SDan Williams
2289bc89cd8SDan Williams if (depend_tx) {
2299bc89cd8SDan Williams chan = depend_tx->chan;
2309bc89cd8SDan Williams device = chan->device;
2319bc89cd8SDan Williams
2329bc89cd8SDan Williams /* see if we can schedule an interrupt
2339bc89cd8SDan Williams * otherwise poll for completion
2349bc89cd8SDan Williams */
2359bc89cd8SDan Williams if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
2369bc89cd8SDan Williams device = NULL;
2379bc89cd8SDan Williams
238636bdeaaSDan Williams tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
2399bc89cd8SDan Williams } else
2409bc89cd8SDan Williams tx = NULL;
2419bc89cd8SDan Williams
2429bc89cd8SDan Williams if (tx) {
2433280ab3eSDan Williams pr_debug("%s: (async)\n", __func__);
2449bc89cd8SDan Williams
245a08abd8cSDan Williams async_tx_submit(chan, tx, submit);
2469bc89cd8SDan Williams } else {
2473280ab3eSDan Williams pr_debug("%s: (sync)\n", __func__);
2489bc89cd8SDan Williams
2499bc89cd8SDan Williams /* wait for any prerequisite operations */
250a08abd8cSDan Williams async_tx_quiesce(&submit->depend_tx);
2519bc89cd8SDan Williams
252a08abd8cSDan Williams async_tx_sync_epilog(submit);
2539bc89cd8SDan Williams }
2549bc89cd8SDan Williams
2559bc89cd8SDan Williams return tx;
2569bc89cd8SDan Williams }
2579bc89cd8SDan Williams EXPORT_SYMBOL_GPL(async_trigger_callback);
2589bc89cd8SDan Williams
259d2c52b79SDan Williams /**
260d2c52b79SDan Williams * async_tx_quiesce - ensure tx is complete and freeable upon return
261*0dee6cd2SRandy Dunlap * @tx: transaction to quiesce
262d2c52b79SDan Williams */
async_tx_quiesce(struct dma_async_tx_descriptor ** tx)263d2c52b79SDan Williams void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
264d2c52b79SDan Williams {
265d2c52b79SDan Williams if (*tx) {
266d2c52b79SDan Williams /* if ack is already set then we cannot be sure
267d2c52b79SDan Williams * we are referring to the correct operation
268d2c52b79SDan Williams */
269d2c52b79SDan Williams BUG_ON(async_tx_test_ack(*tx));
270157efa8cSVinod Koul if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
2717d283397SBartlomiej Zolnierkiewicz panic("%s: DMA error waiting for transaction\n",
2727d283397SBartlomiej Zolnierkiewicz __func__);
273d2c52b79SDan Williams async_tx_ack(*tx);
274d2c52b79SDan Williams *tx = NULL;
275d2c52b79SDan Williams }
276d2c52b79SDan Williams }
277d2c52b79SDan Williams EXPORT_SYMBOL_GPL(async_tx_quiesce);
278d2c52b79SDan Williams
2799bc89cd8SDan Williams MODULE_AUTHOR("Intel Corporation");
2809bc89cd8SDan Williams MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
2819bc89cd8SDan Williams MODULE_LICENSE("GPL");
282