xref: /openbmc/linux/crypto/async_tx/async_tx.c (revision bc5aa3a0)
1 /*
2  * core routines for the asynchronous memory transfer/transform api
3  *
4  * Copyright © 2006, Intel Corporation.
5  *
6  *	Dan Williams <dan.j.williams@intel.com>
7  *
8  *	with architecture considerations by:
9  *	Neil Brown <neilb@suse.de>
10  *	Jeff Garzik <jeff@garzik.org>
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms and conditions of the GNU General Public License,
14  * version 2, as published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program; if not, write to the Free Software Foundation, Inc.,
23  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24  *
25  */
26 #include <linux/rculist.h>
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/async_tx.h>
30 
31 #ifdef CONFIG_DMA_ENGINE
32 static int __init async_tx_init(void)
33 {
34 	async_dmaengine_get();
35 
36 	printk(KERN_INFO "async_tx: api initialized (async)\n");
37 
38 	return 0;
39 }
40 
41 static void __exit async_tx_exit(void)
42 {
43 	async_dmaengine_put();
44 }
45 
46 module_init(async_tx_init);
47 module_exit(async_tx_exit);
48 
49 /**
50  * __async_tx_find_channel - find a channel to carry out the operation or let
51  *	the transaction execute synchronously
52  * @submit: transaction dependency and submission modifiers
53  * @tx_type: transaction type
54  */
55 struct dma_chan *
56 __async_tx_find_channel(struct async_submit_ctl *submit,
57 			enum dma_transaction_type tx_type)
58 {
59 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
60 
61 	/* see if we can keep the chain on one channel */
62 	if (depend_tx &&
63 	    dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
64 		return depend_tx->chan;
65 	return async_dma_find_channel(tx_type);
66 }
67 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
68 #endif
69 
70 
71 /**
72  * async_tx_channel_switch - queue an interrupt descriptor with a dependency
73  * 	pre-attached.
74  * @depend_tx: the operation that must finish before the new operation runs
75  * @tx: the new operation
76  */
77 static void
78 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
79 			struct dma_async_tx_descriptor *tx)
80 {
81 	struct dma_chan *chan = depend_tx->chan;
82 	struct dma_device *device = chan->device;
83 	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
84 
85 	/* first check to see if we can still append to depend_tx */
86 	txd_lock(depend_tx);
87 	if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
88 		txd_chain(depend_tx, tx);
89 		intr_tx = NULL;
90 	}
91 	txd_unlock(depend_tx);
92 
93 	/* attached dependency, flush the parent channel */
94 	if (!intr_tx) {
95 		device->device_issue_pending(chan);
96 		return;
97 	}
98 
99 	/* see if we can schedule an interrupt
100 	 * otherwise poll for completion
101 	 */
102 	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
103 		intr_tx = device->device_prep_dma_interrupt(chan, 0);
104 	else
105 		intr_tx = NULL;
106 
107 	if (intr_tx) {
108 		intr_tx->callback = NULL;
109 		intr_tx->callback_param = NULL;
110 		/* safe to chain outside the lock since we know we are
111 		 * not submitted yet
112 		 */
113 		txd_chain(intr_tx, tx);
114 
115 		/* check if we need to append */
116 		txd_lock(depend_tx);
117 		if (txd_parent(depend_tx)) {
118 			txd_chain(depend_tx, intr_tx);
119 			async_tx_ack(intr_tx);
120 			intr_tx = NULL;
121 		}
122 		txd_unlock(depend_tx);
123 
124 		if (intr_tx) {
125 			txd_clear_parent(intr_tx);
126 			intr_tx->tx_submit(intr_tx);
127 			async_tx_ack(intr_tx);
128 		}
129 		device->device_issue_pending(chan);
130 	} else {
131 		if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
132 			panic("%s: DMA error waiting for depend_tx\n",
133 			      __func__);
134 		tx->tx_submit(tx);
135 	}
136 }
137 
138 
139 /**
140  * submit_disposition - flags for routing an incoming operation
141  * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
142  * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
143  * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
144  *
145  * while holding depend_tx->lock we must avoid submitting new operations
146  * to prevent a circular locking dependency with drivers that already
147  * hold a channel lock when calling async_tx_run_dependencies.
148  */
149 enum submit_disposition {
150 	ASYNC_TX_SUBMITTED,
151 	ASYNC_TX_CHANNEL_SWITCH,
152 	ASYNC_TX_DIRECT_SUBMIT,
153 };
154 
155 void
156 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
157 		struct async_submit_ctl *submit)
158 {
159 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
160 
161 	tx->callback = submit->cb_fn;
162 	tx->callback_param = submit->cb_param;
163 
164 	if (depend_tx) {
165 		enum submit_disposition s;
166 
167 		/* sanity check the dependency chain:
168 		 * 1/ if ack is already set then we cannot be sure
169 		 * we are referring to the correct operation
170 		 * 2/ dependencies are 1:1 i.e. two transactions can
171 		 * not depend on the same parent
172 		 */
173 		BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
174 		       txd_parent(tx));
175 
176 		/* the lock prevents async_tx_run_dependencies from missing
177 		 * the setting of ->next when ->parent != NULL
178 		 */
179 		txd_lock(depend_tx);
180 		if (txd_parent(depend_tx)) {
181 			/* we have a parent so we can not submit directly
182 			 * if we are staying on the same channel: append
183 			 * else: channel switch
184 			 */
185 			if (depend_tx->chan == chan) {
186 				txd_chain(depend_tx, tx);
187 				s = ASYNC_TX_SUBMITTED;
188 			} else
189 				s = ASYNC_TX_CHANNEL_SWITCH;
190 		} else {
191 			/* we do not have a parent so we may be able to submit
192 			 * directly if we are staying on the same channel
193 			 */
194 			if (depend_tx->chan == chan)
195 				s = ASYNC_TX_DIRECT_SUBMIT;
196 			else
197 				s = ASYNC_TX_CHANNEL_SWITCH;
198 		}
199 		txd_unlock(depend_tx);
200 
201 		switch (s) {
202 		case ASYNC_TX_SUBMITTED:
203 			break;
204 		case ASYNC_TX_CHANNEL_SWITCH:
205 			async_tx_channel_switch(depend_tx, tx);
206 			break;
207 		case ASYNC_TX_DIRECT_SUBMIT:
208 			txd_clear_parent(tx);
209 			tx->tx_submit(tx);
210 			break;
211 		}
212 	} else {
213 		txd_clear_parent(tx);
214 		tx->tx_submit(tx);
215 	}
216 
217 	if (submit->flags & ASYNC_TX_ACK)
218 		async_tx_ack(tx);
219 
220 	if (depend_tx)
221 		async_tx_ack(depend_tx);
222 }
223 EXPORT_SYMBOL_GPL(async_tx_submit);
224 
225 /**
226  * async_trigger_callback - schedules the callback function to be run
227  * @submit: submission and completion parameters
228  *
229  * honored flags: ASYNC_TX_ACK
230  *
231  * The callback is run after any dependent operations have completed.
232  */
233 struct dma_async_tx_descriptor *
234 async_trigger_callback(struct async_submit_ctl *submit)
235 {
236 	struct dma_chan *chan;
237 	struct dma_device *device;
238 	struct dma_async_tx_descriptor *tx;
239 	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
240 
241 	if (depend_tx) {
242 		chan = depend_tx->chan;
243 		device = chan->device;
244 
245 		/* see if we can schedule an interrupt
246 		 * otherwise poll for completion
247 		 */
248 		if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
249 			device = NULL;
250 
251 		tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
252 	} else
253 		tx = NULL;
254 
255 	if (tx) {
256 		pr_debug("%s: (async)\n", __func__);
257 
258 		async_tx_submit(chan, tx, submit);
259 	} else {
260 		pr_debug("%s: (sync)\n", __func__);
261 
262 		/* wait for any prerequisite operations */
263 		async_tx_quiesce(&submit->depend_tx);
264 
265 		async_tx_sync_epilog(submit);
266 	}
267 
268 	return tx;
269 }
270 EXPORT_SYMBOL_GPL(async_trigger_callback);
271 
272 /**
273  * async_tx_quiesce - ensure tx is complete and freeable upon return
274  * @tx - transaction to quiesce
275  */
276 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
277 {
278 	if (*tx) {
279 		/* if ack is already set then we cannot be sure
280 		 * we are referring to the correct operation
281 		 */
282 		BUG_ON(async_tx_test_ack(*tx));
283 		if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
284 			panic("%s: DMA error waiting for transaction\n",
285 			      __func__);
286 		async_tx_ack(*tx);
287 		*tx = NULL;
288 	}
289 }
290 EXPORT_SYMBOL_GPL(async_tx_quiesce);
291 
292 MODULE_AUTHOR("Intel Corporation");
293 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
294 MODULE_LICENSE("GPL");
295