1655ff1a1SSrujanaChalla // SPDX-License-Identifier: GPL-2.0-only
2655ff1a1SSrujanaChalla /*
3655ff1a1SSrujanaChalla * Provide TDMA helper functions used by cipher and hash algorithm
4655ff1a1SSrujanaChalla * implementations.
5655ff1a1SSrujanaChalla *
6655ff1a1SSrujanaChalla * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7655ff1a1SSrujanaChalla * Author: Arnaud Ebalard <arno@natisbad.org>
8655ff1a1SSrujanaChalla *
9655ff1a1SSrujanaChalla * This work is based on an initial version written by
10655ff1a1SSrujanaChalla * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
11655ff1a1SSrujanaChalla */
12655ff1a1SSrujanaChalla
13655ff1a1SSrujanaChalla #include "cesa.h"
14655ff1a1SSrujanaChalla
mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter * iter,struct mv_cesa_sg_dma_iter * sgiter,unsigned int len)15655ff1a1SSrujanaChalla bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
16655ff1a1SSrujanaChalla struct mv_cesa_sg_dma_iter *sgiter,
17655ff1a1SSrujanaChalla unsigned int len)
18655ff1a1SSrujanaChalla {
19655ff1a1SSrujanaChalla if (!sgiter->sg)
20655ff1a1SSrujanaChalla return false;
21655ff1a1SSrujanaChalla
22655ff1a1SSrujanaChalla sgiter->op_offset += len;
23655ff1a1SSrujanaChalla sgiter->offset += len;
24655ff1a1SSrujanaChalla if (sgiter->offset == sg_dma_len(sgiter->sg)) {
25655ff1a1SSrujanaChalla if (sg_is_last(sgiter->sg))
26655ff1a1SSrujanaChalla return false;
27655ff1a1SSrujanaChalla sgiter->offset = 0;
28655ff1a1SSrujanaChalla sgiter->sg = sg_next(sgiter->sg);
29655ff1a1SSrujanaChalla }
30655ff1a1SSrujanaChalla
31655ff1a1SSrujanaChalla if (sgiter->op_offset == iter->op_len)
32655ff1a1SSrujanaChalla return false;
33655ff1a1SSrujanaChalla
34655ff1a1SSrujanaChalla return true;
35655ff1a1SSrujanaChalla }
36655ff1a1SSrujanaChalla
mv_cesa_dma_step(struct mv_cesa_req * dreq)37655ff1a1SSrujanaChalla void mv_cesa_dma_step(struct mv_cesa_req *dreq)
38655ff1a1SSrujanaChalla {
39655ff1a1SSrujanaChalla struct mv_cesa_engine *engine = dreq->engine;
40655ff1a1SSrujanaChalla
41655ff1a1SSrujanaChalla writel_relaxed(0, engine->regs + CESA_SA_CFG);
42655ff1a1SSrujanaChalla
43655ff1a1SSrujanaChalla mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
44655ff1a1SSrujanaChalla writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B |
45655ff1a1SSrujanaChalla CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN,
46655ff1a1SSrujanaChalla engine->regs + CESA_TDMA_CONTROL);
47655ff1a1SSrujanaChalla
48655ff1a1SSrujanaChalla writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT |
49655ff1a1SSrujanaChalla CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS,
50655ff1a1SSrujanaChalla engine->regs + CESA_SA_CFG);
51655ff1a1SSrujanaChalla writel_relaxed(dreq->chain.first->cur_dma,
52655ff1a1SSrujanaChalla engine->regs + CESA_TDMA_NEXT_ADDR);
53655ff1a1SSrujanaChalla WARN_ON(readl(engine->regs + CESA_SA_CMD) &
54655ff1a1SSrujanaChalla CESA_SA_CMD_EN_CESA_SA_ACCL0);
55655ff1a1SSrujanaChalla writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
56655ff1a1SSrujanaChalla }
57655ff1a1SSrujanaChalla
mv_cesa_dma_cleanup(struct mv_cesa_req * dreq)58655ff1a1SSrujanaChalla void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
59655ff1a1SSrujanaChalla {
60655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
61655ff1a1SSrujanaChalla
62655ff1a1SSrujanaChalla for (tdma = dreq->chain.first; tdma;) {
63655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *old_tdma = tdma;
64655ff1a1SSrujanaChalla u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
65655ff1a1SSrujanaChalla
66655ff1a1SSrujanaChalla if (type == CESA_TDMA_OP)
67655ff1a1SSrujanaChalla dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
68655ff1a1SSrujanaChalla le32_to_cpu(tdma->src));
69655ff1a1SSrujanaChalla
70655ff1a1SSrujanaChalla tdma = tdma->next;
71655ff1a1SSrujanaChalla dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
72655ff1a1SSrujanaChalla old_tdma->cur_dma);
73655ff1a1SSrujanaChalla }
74655ff1a1SSrujanaChalla
75655ff1a1SSrujanaChalla dreq->chain.first = NULL;
76655ff1a1SSrujanaChalla dreq->chain.last = NULL;
77655ff1a1SSrujanaChalla }
78655ff1a1SSrujanaChalla
mv_cesa_dma_prepare(struct mv_cesa_req * dreq,struct mv_cesa_engine * engine)79655ff1a1SSrujanaChalla void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
80655ff1a1SSrujanaChalla struct mv_cesa_engine *engine)
81655ff1a1SSrujanaChalla {
82655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
83655ff1a1SSrujanaChalla
84655ff1a1SSrujanaChalla for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
85655ff1a1SSrujanaChalla if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
86e62291c1SHerbert Xu tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
87655ff1a1SSrujanaChalla
88655ff1a1SSrujanaChalla if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
89e62291c1SHerbert Xu tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
90655ff1a1SSrujanaChalla
91655ff1a1SSrujanaChalla if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
92655ff1a1SSrujanaChalla mv_cesa_adjust_op(engine, tdma->op);
93655ff1a1SSrujanaChalla }
94655ff1a1SSrujanaChalla }
95655ff1a1SSrujanaChalla
mv_cesa_tdma_chain(struct mv_cesa_engine * engine,struct mv_cesa_req * dreq)96655ff1a1SSrujanaChalla void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
97655ff1a1SSrujanaChalla struct mv_cesa_req *dreq)
98655ff1a1SSrujanaChalla {
99655ff1a1SSrujanaChalla if (engine->chain.first == NULL && engine->chain.last == NULL) {
100655ff1a1SSrujanaChalla engine->chain.first = dreq->chain.first;
101655ff1a1SSrujanaChalla engine->chain.last = dreq->chain.last;
102655ff1a1SSrujanaChalla } else {
103655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *last;
104655ff1a1SSrujanaChalla
105655ff1a1SSrujanaChalla last = engine->chain.last;
106655ff1a1SSrujanaChalla last->next = dreq->chain.first;
107655ff1a1SSrujanaChalla engine->chain.last = dreq->chain.last;
108655ff1a1SSrujanaChalla
109655ff1a1SSrujanaChalla /*
110655ff1a1SSrujanaChalla * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
111655ff1a1SSrujanaChalla * the last element of the current chain, or if the request
112655ff1a1SSrujanaChalla * being queued needs the IV regs to be set before lauching
113655ff1a1SSrujanaChalla * the request.
114655ff1a1SSrujanaChalla */
115655ff1a1SSrujanaChalla if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
116655ff1a1SSrujanaChalla !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
117e62291c1SHerbert Xu last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
118655ff1a1SSrujanaChalla }
119655ff1a1SSrujanaChalla }
120655ff1a1SSrujanaChalla
mv_cesa_tdma_process(struct mv_cesa_engine * engine,u32 status)121655ff1a1SSrujanaChalla int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
122655ff1a1SSrujanaChalla {
123655ff1a1SSrujanaChalla struct crypto_async_request *req = NULL;
124655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
125655ff1a1SSrujanaChalla dma_addr_t tdma_cur;
126655ff1a1SSrujanaChalla int res = 0;
127655ff1a1SSrujanaChalla
128655ff1a1SSrujanaChalla tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
129655ff1a1SSrujanaChalla
130655ff1a1SSrujanaChalla for (tdma = engine->chain.first; tdma; tdma = next) {
131655ff1a1SSrujanaChalla spin_lock_bh(&engine->lock);
132655ff1a1SSrujanaChalla next = tdma->next;
133655ff1a1SSrujanaChalla spin_unlock_bh(&engine->lock);
134655ff1a1SSrujanaChalla
135655ff1a1SSrujanaChalla if (tdma->flags & CESA_TDMA_END_OF_REQ) {
136655ff1a1SSrujanaChalla struct crypto_async_request *backlog = NULL;
137655ff1a1SSrujanaChalla struct mv_cesa_ctx *ctx;
138655ff1a1SSrujanaChalla u32 current_status;
139655ff1a1SSrujanaChalla
140655ff1a1SSrujanaChalla spin_lock_bh(&engine->lock);
141655ff1a1SSrujanaChalla /*
142655ff1a1SSrujanaChalla * if req is NULL, this means we're processing the
143655ff1a1SSrujanaChalla * request in engine->req.
144655ff1a1SSrujanaChalla */
145655ff1a1SSrujanaChalla if (!req)
146655ff1a1SSrujanaChalla req = engine->req;
147655ff1a1SSrujanaChalla else
148655ff1a1SSrujanaChalla req = mv_cesa_dequeue_req_locked(engine,
149655ff1a1SSrujanaChalla &backlog);
150655ff1a1SSrujanaChalla
151655ff1a1SSrujanaChalla /* Re-chaining to the next request */
152655ff1a1SSrujanaChalla engine->chain.first = tdma->next;
153655ff1a1SSrujanaChalla tdma->next = NULL;
154655ff1a1SSrujanaChalla
155655ff1a1SSrujanaChalla /* If this is the last request, clear the chain */
156655ff1a1SSrujanaChalla if (engine->chain.first == NULL)
157655ff1a1SSrujanaChalla engine->chain.last = NULL;
158655ff1a1SSrujanaChalla spin_unlock_bh(&engine->lock);
159655ff1a1SSrujanaChalla
160655ff1a1SSrujanaChalla ctx = crypto_tfm_ctx(req->tfm);
161655ff1a1SSrujanaChalla current_status = (tdma->cur_dma == tdma_cur) ?
162655ff1a1SSrujanaChalla status : CESA_SA_INT_ACC0_IDMA_DONE;
163655ff1a1SSrujanaChalla res = ctx->ops->process(req, current_status);
164655ff1a1SSrujanaChalla ctx->ops->complete(req);
165655ff1a1SSrujanaChalla
166655ff1a1SSrujanaChalla if (res == 0)
167655ff1a1SSrujanaChalla mv_cesa_engine_enqueue_complete_request(engine,
168655ff1a1SSrujanaChalla req);
169655ff1a1SSrujanaChalla
170655ff1a1SSrujanaChalla if (backlog)
171*25e3159cSHerbert Xu crypto_request_complete(backlog, -EINPROGRESS);
172655ff1a1SSrujanaChalla }
173655ff1a1SSrujanaChalla
174655ff1a1SSrujanaChalla if (res || tdma->cur_dma == tdma_cur)
175655ff1a1SSrujanaChalla break;
176655ff1a1SSrujanaChalla }
177655ff1a1SSrujanaChalla
178655ff1a1SSrujanaChalla /*
179655ff1a1SSrujanaChalla * Save the last request in error to engine->req, so that the core
1804d6a5a4bSBhaskar Chowdhury * knows which request was faulty
181655ff1a1SSrujanaChalla */
182655ff1a1SSrujanaChalla if (res) {
183655ff1a1SSrujanaChalla spin_lock_bh(&engine->lock);
184655ff1a1SSrujanaChalla engine->req = req;
185655ff1a1SSrujanaChalla spin_unlock_bh(&engine->lock);
186655ff1a1SSrujanaChalla }
187655ff1a1SSrujanaChalla
188655ff1a1SSrujanaChalla return res;
189655ff1a1SSrujanaChalla }
190655ff1a1SSrujanaChalla
191655ff1a1SSrujanaChalla static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain * chain,gfp_t flags)192655ff1a1SSrujanaChalla mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
193655ff1a1SSrujanaChalla {
194655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *new_tdma = NULL;
195655ff1a1SSrujanaChalla dma_addr_t dma_handle;
196655ff1a1SSrujanaChalla
197655ff1a1SSrujanaChalla new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
198655ff1a1SSrujanaChalla &dma_handle);
199655ff1a1SSrujanaChalla if (!new_tdma)
200655ff1a1SSrujanaChalla return ERR_PTR(-ENOMEM);
201655ff1a1SSrujanaChalla
202655ff1a1SSrujanaChalla new_tdma->cur_dma = dma_handle;
203655ff1a1SSrujanaChalla if (chain->last) {
204655ff1a1SSrujanaChalla chain->last->next_dma = cpu_to_le32(dma_handle);
205655ff1a1SSrujanaChalla chain->last->next = new_tdma;
206655ff1a1SSrujanaChalla } else {
207655ff1a1SSrujanaChalla chain->first = new_tdma;
208655ff1a1SSrujanaChalla }
209655ff1a1SSrujanaChalla
210655ff1a1SSrujanaChalla chain->last = new_tdma;
211655ff1a1SSrujanaChalla
212655ff1a1SSrujanaChalla return new_tdma;
213655ff1a1SSrujanaChalla }
214655ff1a1SSrujanaChalla
mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain * chain,dma_addr_t src,u32 size,u32 flags,gfp_t gfp_flags)215655ff1a1SSrujanaChalla int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
216655ff1a1SSrujanaChalla u32 size, u32 flags, gfp_t gfp_flags)
217655ff1a1SSrujanaChalla {
218655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma, *op_desc;
219655ff1a1SSrujanaChalla
220655ff1a1SSrujanaChalla tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
221655ff1a1SSrujanaChalla if (IS_ERR(tdma))
222655ff1a1SSrujanaChalla return PTR_ERR(tdma);
223655ff1a1SSrujanaChalla
224655ff1a1SSrujanaChalla /* We re-use an existing op_desc object to retrieve the context
225655ff1a1SSrujanaChalla * and result instead of allocating a new one.
226655ff1a1SSrujanaChalla * There is at least one object of this type in a CESA crypto
227655ff1a1SSrujanaChalla * req, just pick the first one in the chain.
228655ff1a1SSrujanaChalla */
229655ff1a1SSrujanaChalla for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
230655ff1a1SSrujanaChalla u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
231655ff1a1SSrujanaChalla
232655ff1a1SSrujanaChalla if (type == CESA_TDMA_OP)
233655ff1a1SSrujanaChalla break;
234655ff1a1SSrujanaChalla }
235655ff1a1SSrujanaChalla
236655ff1a1SSrujanaChalla if (!op_desc)
237655ff1a1SSrujanaChalla return -EIO;
238655ff1a1SSrujanaChalla
239655ff1a1SSrujanaChalla tdma->byte_cnt = cpu_to_le32(size | BIT(31));
240e62291c1SHerbert Xu tdma->src_dma = src;
241e62291c1SHerbert Xu tdma->dst_dma = op_desc->src_dma;
242655ff1a1SSrujanaChalla tdma->op = op_desc->op;
243655ff1a1SSrujanaChalla
244655ff1a1SSrujanaChalla flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
245655ff1a1SSrujanaChalla tdma->flags = flags | CESA_TDMA_RESULT;
246655ff1a1SSrujanaChalla return 0;
247655ff1a1SSrujanaChalla }
248655ff1a1SSrujanaChalla
mv_cesa_dma_add_op(struct mv_cesa_tdma_chain * chain,const struct mv_cesa_op_ctx * op_templ,bool skip_ctx,gfp_t flags)249655ff1a1SSrujanaChalla struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
250655ff1a1SSrujanaChalla const struct mv_cesa_op_ctx *op_templ,
251655ff1a1SSrujanaChalla bool skip_ctx,
252655ff1a1SSrujanaChalla gfp_t flags)
253655ff1a1SSrujanaChalla {
254655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
255655ff1a1SSrujanaChalla struct mv_cesa_op_ctx *op;
256655ff1a1SSrujanaChalla dma_addr_t dma_handle;
257655ff1a1SSrujanaChalla unsigned int size;
258655ff1a1SSrujanaChalla
259655ff1a1SSrujanaChalla tdma = mv_cesa_dma_add_desc(chain, flags);
260655ff1a1SSrujanaChalla if (IS_ERR(tdma))
261655ff1a1SSrujanaChalla return ERR_CAST(tdma);
262655ff1a1SSrujanaChalla
263655ff1a1SSrujanaChalla op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle);
264655ff1a1SSrujanaChalla if (!op)
265655ff1a1SSrujanaChalla return ERR_PTR(-ENOMEM);
266655ff1a1SSrujanaChalla
267655ff1a1SSrujanaChalla *op = *op_templ;
268655ff1a1SSrujanaChalla
269655ff1a1SSrujanaChalla size = skip_ctx ? sizeof(op->desc) : sizeof(*op);
270655ff1a1SSrujanaChalla
271655ff1a1SSrujanaChalla tdma = chain->last;
272655ff1a1SSrujanaChalla tdma->op = op;
273655ff1a1SSrujanaChalla tdma->byte_cnt = cpu_to_le32(size | BIT(31));
274655ff1a1SSrujanaChalla tdma->src = cpu_to_le32(dma_handle);
275e62291c1SHerbert Xu tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
276655ff1a1SSrujanaChalla tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
277655ff1a1SSrujanaChalla
278655ff1a1SSrujanaChalla return op;
279655ff1a1SSrujanaChalla }
280655ff1a1SSrujanaChalla
mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain * chain,dma_addr_t dst,dma_addr_t src,u32 size,u32 flags,gfp_t gfp_flags)281655ff1a1SSrujanaChalla int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
282655ff1a1SSrujanaChalla dma_addr_t dst, dma_addr_t src, u32 size,
283655ff1a1SSrujanaChalla u32 flags, gfp_t gfp_flags)
284655ff1a1SSrujanaChalla {
285655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
286655ff1a1SSrujanaChalla
287655ff1a1SSrujanaChalla tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
288655ff1a1SSrujanaChalla if (IS_ERR(tdma))
289655ff1a1SSrujanaChalla return PTR_ERR(tdma);
290655ff1a1SSrujanaChalla
291655ff1a1SSrujanaChalla tdma->byte_cnt = cpu_to_le32(size | BIT(31));
292e62291c1SHerbert Xu tdma->src_dma = src;
293e62291c1SHerbert Xu tdma->dst_dma = dst;
294655ff1a1SSrujanaChalla
295655ff1a1SSrujanaChalla flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
296655ff1a1SSrujanaChalla tdma->flags = flags | CESA_TDMA_DATA;
297655ff1a1SSrujanaChalla
298655ff1a1SSrujanaChalla return 0;
299655ff1a1SSrujanaChalla }
300655ff1a1SSrujanaChalla
mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain * chain,gfp_t flags)301655ff1a1SSrujanaChalla int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
302655ff1a1SSrujanaChalla {
303655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
304655ff1a1SSrujanaChalla
305655ff1a1SSrujanaChalla tdma = mv_cesa_dma_add_desc(chain, flags);
306655ff1a1SSrujanaChalla return PTR_ERR_OR_ZERO(tdma);
307655ff1a1SSrujanaChalla }
308655ff1a1SSrujanaChalla
mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain * chain,gfp_t flags)309655ff1a1SSrujanaChalla int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
310655ff1a1SSrujanaChalla {
311655ff1a1SSrujanaChalla struct mv_cesa_tdma_desc *tdma;
312655ff1a1SSrujanaChalla
313655ff1a1SSrujanaChalla tdma = mv_cesa_dma_add_desc(chain, flags);
314655ff1a1SSrujanaChalla if (IS_ERR(tdma))
315655ff1a1SSrujanaChalla return PTR_ERR(tdma);
316655ff1a1SSrujanaChalla
317655ff1a1SSrujanaChalla tdma->byte_cnt = cpu_to_le32(BIT(31));
318655ff1a1SSrujanaChalla
319655ff1a1SSrujanaChalla return 0;
320655ff1a1SSrujanaChalla }
321655ff1a1SSrujanaChalla
mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain * chain,struct mv_cesa_dma_iter * dma_iter,struct mv_cesa_sg_dma_iter * sgiter,gfp_t gfp_flags)322655ff1a1SSrujanaChalla int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
323655ff1a1SSrujanaChalla struct mv_cesa_dma_iter *dma_iter,
324655ff1a1SSrujanaChalla struct mv_cesa_sg_dma_iter *sgiter,
325655ff1a1SSrujanaChalla gfp_t gfp_flags)
326655ff1a1SSrujanaChalla {
327655ff1a1SSrujanaChalla u32 flags = sgiter->dir == DMA_TO_DEVICE ?
328655ff1a1SSrujanaChalla CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM;
329655ff1a1SSrujanaChalla unsigned int len;
330655ff1a1SSrujanaChalla
331655ff1a1SSrujanaChalla do {
332655ff1a1SSrujanaChalla dma_addr_t dst, src;
333655ff1a1SSrujanaChalla int ret;
334655ff1a1SSrujanaChalla
335655ff1a1SSrujanaChalla len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter);
336655ff1a1SSrujanaChalla if (sgiter->dir == DMA_TO_DEVICE) {
337655ff1a1SSrujanaChalla dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
338655ff1a1SSrujanaChalla src = sg_dma_address(sgiter->sg) + sgiter->offset;
339655ff1a1SSrujanaChalla } else {
340655ff1a1SSrujanaChalla dst = sg_dma_address(sgiter->sg) + sgiter->offset;
341655ff1a1SSrujanaChalla src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset;
342655ff1a1SSrujanaChalla }
343655ff1a1SSrujanaChalla
344655ff1a1SSrujanaChalla ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len,
345655ff1a1SSrujanaChalla flags, gfp_flags);
346655ff1a1SSrujanaChalla if (ret)
347655ff1a1SSrujanaChalla return ret;
348655ff1a1SSrujanaChalla
349655ff1a1SSrujanaChalla } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len));
350655ff1a1SSrujanaChalla
351655ff1a1SSrujanaChalla return 0;
352655ff1a1SSrujanaChalla }
353c114cf7fSHerbert Xu
mv_cesa_sg_copy(struct mv_cesa_engine * engine,struct scatterlist * sgl,unsigned int nents,unsigned int sram_off,size_t buflen,off_t skip,bool to_sram)354c114cf7fSHerbert Xu size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
355c114cf7fSHerbert Xu struct scatterlist *sgl, unsigned int nents,
356c114cf7fSHerbert Xu unsigned int sram_off, size_t buflen, off_t skip,
357c114cf7fSHerbert Xu bool to_sram)
358c114cf7fSHerbert Xu {
359c114cf7fSHerbert Xu unsigned int sg_flags = SG_MITER_ATOMIC;
360c114cf7fSHerbert Xu struct sg_mapping_iter miter;
361c114cf7fSHerbert Xu unsigned int offset = 0;
362c114cf7fSHerbert Xu
363c114cf7fSHerbert Xu if (to_sram)
364c114cf7fSHerbert Xu sg_flags |= SG_MITER_FROM_SG;
365c114cf7fSHerbert Xu else
366c114cf7fSHerbert Xu sg_flags |= SG_MITER_TO_SG;
367c114cf7fSHerbert Xu
368c114cf7fSHerbert Xu sg_miter_start(&miter, sgl, nents, sg_flags);
369c114cf7fSHerbert Xu
370c114cf7fSHerbert Xu if (!sg_miter_skip(&miter, skip))
371c114cf7fSHerbert Xu return 0;
372c114cf7fSHerbert Xu
373c114cf7fSHerbert Xu while ((offset < buflen) && sg_miter_next(&miter)) {
374c114cf7fSHerbert Xu unsigned int len;
375c114cf7fSHerbert Xu
376c114cf7fSHerbert Xu len = min(miter.length, buflen - offset);
377c114cf7fSHerbert Xu
378c114cf7fSHerbert Xu if (to_sram) {
379c114cf7fSHerbert Xu if (engine->pool)
380c114cf7fSHerbert Xu memcpy(engine->sram_pool + sram_off + offset,
381c114cf7fSHerbert Xu miter.addr, len);
382c114cf7fSHerbert Xu else
383c114cf7fSHerbert Xu memcpy_toio(engine->sram + sram_off + offset,
384c114cf7fSHerbert Xu miter.addr, len);
385c114cf7fSHerbert Xu } else {
386c114cf7fSHerbert Xu if (engine->pool)
387c114cf7fSHerbert Xu memcpy(miter.addr,
388c114cf7fSHerbert Xu engine->sram_pool + sram_off + offset,
389c114cf7fSHerbert Xu len);
390c114cf7fSHerbert Xu else
391c114cf7fSHerbert Xu memcpy_fromio(miter.addr,
392c114cf7fSHerbert Xu engine->sram + sram_off + offset,
393c114cf7fSHerbert Xu len);
394c114cf7fSHerbert Xu }
395c114cf7fSHerbert Xu
396c114cf7fSHerbert Xu offset += len;
397c114cf7fSHerbert Xu }
398c114cf7fSHerbert Xu
399c114cf7fSHerbert Xu sg_miter_stop(&miter);
400c114cf7fSHerbert Xu
401c114cf7fSHerbert Xu return offset;
402c114cf7fSHerbert Xu }
403