1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Renesas R-Car Audio DMAC support
4 //
5 // Copyright (C) 2015 Renesas Electronics Corp.
6 // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7
8 #include <linux/delay.h>
9 #include <linux/of_dma.h>
10 #include "rsnd.h"
11
12 /*
13 * Audio DMAC peri peri register
14 */
15 #define PDMASAR 0x00
16 #define PDMADAR 0x04
17 #define PDMACHCR 0x0c
18
19 /* PDMACHCR */
20 #define PDMACHCR_DE (1 << 0)
21
22
23 struct rsnd_dmaen {
24 struct dma_chan *chan;
25 dma_cookie_t cookie;
26 unsigned int dma_len;
27 };
28
29 struct rsnd_dmapp {
30 int dmapp_id;
31 u32 chcr;
32 };
33
34 struct rsnd_dma {
35 struct rsnd_mod mod;
36 struct rsnd_mod *mod_from;
37 struct rsnd_mod *mod_to;
38 dma_addr_t src_addr;
39 dma_addr_t dst_addr;
40 union {
41 struct rsnd_dmaen en;
42 struct rsnd_dmapp pp;
43 } dma;
44 };
45
46 struct rsnd_dma_ctrl {
47 void __iomem *ppbase;
48 phys_addr_t ppres;
49 int dmaen_num;
50 int dmapp_num;
51 };
52
53 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
54 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
55 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
56 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
57
58 /* for DEBUG */
59 static struct rsnd_mod_ops mem_ops = {
60 .name = "mem",
61 };
62
63 static struct rsnd_mod mem = {
64 };
65
66 /*
67 * Audio DMAC
68 */
__rsnd_dmaen_complete(struct rsnd_mod * mod,struct rsnd_dai_stream * io)69 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
70 struct rsnd_dai_stream *io)
71 {
72 if (rsnd_io_is_working(io))
73 rsnd_dai_period_elapsed(io);
74 }
75
rsnd_dmaen_complete(void * data)76 static void rsnd_dmaen_complete(void *data)
77 {
78 struct rsnd_mod *mod = data;
79
80 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
81 }
82
rsnd_dmaen_request_channel(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)83 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
84 struct rsnd_mod *mod_from,
85 struct rsnd_mod *mod_to)
86 {
87 if ((!mod_from && !mod_to) ||
88 (mod_from && mod_to))
89 return NULL;
90
91 if (mod_from)
92 return rsnd_mod_dma_req(io, mod_from);
93 else
94 return rsnd_mod_dma_req(io, mod_to);
95 }
96
rsnd_dmaen_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)97 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
98 struct rsnd_dai_stream *io,
99 struct rsnd_priv *priv)
100 {
101 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
102 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
103
104 if (dmaen->chan)
105 dmaengine_terminate_async(dmaen->chan);
106
107 return 0;
108 }
109
rsnd_dmaen_cleanup(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)110 static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
111 struct rsnd_dai_stream *io,
112 struct rsnd_priv *priv)
113 {
114 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
115 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
116
117 /*
118 * DMAEngine release uses mutex lock.
119 * Thus, it shouldn't be called under spinlock.
120 * Let's call it under prepare
121 */
122 if (dmaen->chan)
123 dma_release_channel(dmaen->chan);
124
125 dmaen->chan = NULL;
126
127 return 0;
128 }
129
rsnd_dmaen_prepare(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)130 static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
131 struct rsnd_dai_stream *io,
132 struct rsnd_priv *priv)
133 {
134 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
135 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
136 struct device *dev = rsnd_priv_to_dev(priv);
137
138 /* maybe suspended */
139 if (dmaen->chan)
140 return 0;
141
142 /*
143 * DMAEngine request uses mutex lock.
144 * Thus, it shouldn't be called under spinlock.
145 * Let's call it under prepare
146 */
147 dmaen->chan = rsnd_dmaen_request_channel(io,
148 dma->mod_from,
149 dma->mod_to);
150 if (IS_ERR_OR_NULL(dmaen->chan)) {
151 dmaen->chan = NULL;
152 dev_err(dev, "can't get dma channel\n");
153 return -EIO;
154 }
155
156 return 0;
157 }
158
rsnd_dmaen_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)159 static int rsnd_dmaen_start(struct rsnd_mod *mod,
160 struct rsnd_dai_stream *io,
161 struct rsnd_priv *priv)
162 {
163 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
164 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
165 struct snd_pcm_substream *substream = io->substream;
166 struct device *dev = rsnd_priv_to_dev(priv);
167 struct dma_async_tx_descriptor *desc;
168 struct dma_slave_config cfg = {};
169 enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
170 int is_play = rsnd_io_is_play(io);
171 int ret;
172
173 /*
174 * in case of monaural data writing or reading through Audio-DMAC
175 * data is always in Left Justified format, so both src and dst
176 * DMA Bus width need to be set equal to physical data width.
177 */
178 if (rsnd_runtime_channel_original(io) == 1) {
179 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
180 int bits = snd_pcm_format_physical_width(runtime->format);
181
182 switch (bits) {
183 case 8:
184 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
185 break;
186 case 16:
187 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
188 break;
189 case 32:
190 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
191 break;
192 default:
193 dev_err(dev, "invalid format width %d\n", bits);
194 return -EINVAL;
195 }
196 }
197
198 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
199 cfg.src_addr = dma->src_addr;
200 cfg.dst_addr = dma->dst_addr;
201 cfg.src_addr_width = buswidth;
202 cfg.dst_addr_width = buswidth;
203
204 dev_dbg(dev, "%s %pad -> %pad\n",
205 rsnd_mod_name(mod),
206 &cfg.src_addr, &cfg.dst_addr);
207
208 ret = dmaengine_slave_config(dmaen->chan, &cfg);
209 if (ret < 0)
210 return ret;
211
212 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
213 substream->runtime->dma_addr,
214 snd_pcm_lib_buffer_bytes(substream),
215 snd_pcm_lib_period_bytes(substream),
216 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
217 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
218
219 if (!desc) {
220 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
221 return -EIO;
222 }
223
224 desc->callback = rsnd_dmaen_complete;
225 desc->callback_param = rsnd_mod_get(dma);
226
227 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
228
229 dmaen->cookie = dmaengine_submit(desc);
230 if (dmaen->cookie < 0) {
231 dev_err(dev, "dmaengine_submit() fail\n");
232 return -EIO;
233 }
234
235 dma_async_issue_pending(dmaen->chan);
236
237 return 0;
238 }
239
rsnd_dma_request_channel(struct device_node * of_node,char * name,struct rsnd_mod * mod,char * x)240 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, char *name,
241 struct rsnd_mod *mod, char *x)
242 {
243 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
244 struct device *dev = rsnd_priv_to_dev(priv);
245 struct dma_chan *chan = NULL;
246 struct device_node *np;
247 int i = 0;
248
249 for_each_child_of_node(of_node, np) {
250 i = rsnd_node_fixed_index(dev, np, name, i);
251 if (i < 0) {
252 chan = NULL;
253 of_node_put(np);
254 break;
255 }
256
257 if (i == rsnd_mod_id_raw(mod) && (!chan))
258 chan = of_dma_request_slave_channel(np, x);
259 i++;
260 }
261
262 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
263 of_node_put(of_node);
264
265 return chan;
266 }
267
rsnd_dmaen_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)268 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
269 struct rsnd_dma *dma,
270 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
271 {
272 struct rsnd_priv *priv = rsnd_io_to_priv(io);
273 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
274 struct dma_chan *chan;
275
276 /* try to get DMAEngine channel */
277 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
278 if (IS_ERR_OR_NULL(chan)) {
279 /* Let's follow when -EPROBE_DEFER case */
280 if (PTR_ERR(chan) == -EPROBE_DEFER)
281 return PTR_ERR(chan);
282
283 /*
284 * DMA failed. try to PIO mode
285 * see
286 * rsnd_ssi_fallback()
287 * rsnd_rdai_continuance_probe()
288 */
289 return -EAGAIN;
290 }
291
292 /*
293 * use it for IPMMU if needed
294 * see
295 * rsnd_preallocate_pages()
296 */
297 io->dmac_dev = chan->device->dev;
298
299 dma_release_channel(chan);
300
301 dmac->dmaen_num++;
302
303 return 0;
304 }
305
rsnd_dmaen_pointer(struct rsnd_mod * mod,struct rsnd_dai_stream * io,snd_pcm_uframes_t * pointer)306 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
307 struct rsnd_dai_stream *io,
308 snd_pcm_uframes_t *pointer)
309 {
310 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
311 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
312 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
313 struct dma_tx_state state;
314 enum dma_status status;
315 unsigned int pos = 0;
316
317 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
318 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
319 if (state.residue > 0 && state.residue <= dmaen->dma_len)
320 pos = dmaen->dma_len - state.residue;
321 }
322 *pointer = bytes_to_frames(runtime, pos);
323
324 return 0;
325 }
326
327 static struct rsnd_mod_ops rsnd_dmaen_ops = {
328 .name = "audmac",
329 .prepare = rsnd_dmaen_prepare,
330 .cleanup = rsnd_dmaen_cleanup,
331 .start = rsnd_dmaen_start,
332 .stop = rsnd_dmaen_stop,
333 .pointer = rsnd_dmaen_pointer,
334 .get_status = rsnd_mod_get_status,
335 };
336
337 /*
338 * Audio DMAC peri peri
339 */
340 static const u8 gen2_id_table_ssiu[] = {
341 /* SSI00 ~ SSI07 */
342 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
343 /* SSI10 ~ SSI17 */
344 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
345 /* SSI20 ~ SSI27 */
346 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
347 /* SSI30 ~ SSI37 */
348 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
349 /* SSI40 ~ SSI47 */
350 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
351 /* SSI5 */
352 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
353 /* SSI6 */
354 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 /* SSI7 */
356 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
357 /* SSI8 */
358 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
359 /* SSI90 ~ SSI97 */
360 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
361 };
362 static const u8 gen2_id_table_scu[] = {
363 0x2d, /* SCU_SRCI0 */
364 0x2e, /* SCU_SRCI1 */
365 0x2f, /* SCU_SRCI2 */
366 0x30, /* SCU_SRCI3 */
367 0x31, /* SCU_SRCI4 */
368 0x32, /* SCU_SRCI5 */
369 0x33, /* SCU_SRCI6 */
370 0x34, /* SCU_SRCI7 */
371 0x35, /* SCU_SRCI8 */
372 0x36, /* SCU_SRCI9 */
373 };
374 static const u8 gen2_id_table_cmd[] = {
375 0x37, /* SCU_CMD0 */
376 0x38, /* SCU_CMD1 */
377 };
378
rsnd_dmapp_get_id(struct rsnd_dai_stream * io,struct rsnd_mod * mod)379 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
380 struct rsnd_mod *mod)
381 {
382 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
383 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
384 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
385 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
386 const u8 *entry = NULL;
387 int id = 255;
388 int size = 0;
389
390 if ((mod == ssi) ||
391 (mod == ssiu)) {
392 int busif = rsnd_mod_id_sub(ssiu);
393
394 entry = gen2_id_table_ssiu;
395 size = ARRAY_SIZE(gen2_id_table_ssiu);
396 id = (rsnd_mod_id(mod) * 8) + busif;
397 } else if (mod == src) {
398 entry = gen2_id_table_scu;
399 size = ARRAY_SIZE(gen2_id_table_scu);
400 id = rsnd_mod_id(mod);
401 } else if (mod == dvc) {
402 entry = gen2_id_table_cmd;
403 size = ARRAY_SIZE(gen2_id_table_cmd);
404 id = rsnd_mod_id(mod);
405 }
406
407 if ((!entry) || (size <= id)) {
408 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
409
410 dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
411
412 /* use non-prohibited SRS number as error */
413 return 0x00; /* SSI00 */
414 }
415
416 return entry[id];
417 }
418
rsnd_dmapp_get_chcr(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)419 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
420 struct rsnd_mod *mod_from,
421 struct rsnd_mod *mod_to)
422 {
423 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
424 (rsnd_dmapp_get_id(io, mod_to) << 16);
425 }
426
427 #define rsnd_dmapp_addr(dmac, dma, reg) \
428 (dmac->ppbase + 0x20 + reg + \
429 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
rsnd_dmapp_write(struct rsnd_dma * dma,u32 data,u32 reg)430 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
431 {
432 struct rsnd_mod *mod = rsnd_mod_get(dma);
433 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
434 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
435 struct device *dev = rsnd_priv_to_dev(priv);
436
437 dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
438
439 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
440 }
441
rsnd_dmapp_read(struct rsnd_dma * dma,u32 reg)442 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
443 {
444 struct rsnd_mod *mod = rsnd_mod_get(dma);
445 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
446 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
447
448 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
449 }
450
rsnd_dmapp_bset(struct rsnd_dma * dma,u32 data,u32 mask,u32 reg)451 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
452 {
453 struct rsnd_mod *mod = rsnd_mod_get(dma);
454 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
455 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
456 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
457 u32 val = ioread32(addr);
458
459 val &= ~mask;
460 val |= (data & mask);
461
462 iowrite32(val, addr);
463 }
464
rsnd_dmapp_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)465 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
466 struct rsnd_dai_stream *io,
467 struct rsnd_priv *priv)
468 {
469 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
470 int i;
471
472 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
473
474 for (i = 0; i < 1024; i++) {
475 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
476 return 0;
477 udelay(1);
478 }
479
480 return -EIO;
481 }
482
rsnd_dmapp_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)483 static int rsnd_dmapp_start(struct rsnd_mod *mod,
484 struct rsnd_dai_stream *io,
485 struct rsnd_priv *priv)
486 {
487 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
488 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
489
490 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
491 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
492 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
493
494 return 0;
495 }
496
rsnd_dmapp_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)497 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
498 struct rsnd_dma *dma,
499 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
500 {
501 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
502 struct rsnd_priv *priv = rsnd_io_to_priv(io);
503 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
504 struct device *dev = rsnd_priv_to_dev(priv);
505
506 dmapp->dmapp_id = dmac->dmapp_num;
507 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
508
509 dmac->dmapp_num++;
510
511 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
512 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
513
514 return 0;
515 }
516
517 #ifdef CONFIG_DEBUG_FS
rsnd_dmapp_debug_info(struct seq_file * m,struct rsnd_dai_stream * io,struct rsnd_mod * mod)518 static void rsnd_dmapp_debug_info(struct seq_file *m,
519 struct rsnd_dai_stream *io,
520 struct rsnd_mod *mod)
521 {
522 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
523 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
524 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
525 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
526
527 rsnd_debugfs_reg_show(m, dmac->ppres, dmac->ppbase,
528 0x20 + 0x10 * dmapp->dmapp_id, 0x10);
529 }
530 #define DEBUG_INFO .debug_info = rsnd_dmapp_debug_info
531 #else
532 #define DEBUG_INFO
533 #endif
534
535 static struct rsnd_mod_ops rsnd_dmapp_ops = {
536 .name = "audmac-pp",
537 .start = rsnd_dmapp_start,
538 .stop = rsnd_dmapp_stop,
539 .quit = rsnd_dmapp_stop,
540 .get_status = rsnd_mod_get_status,
541 DEBUG_INFO
542 };
543
544 /*
545 * Common DMAC Interface
546 */
547
548 /*
549 * DMA read/write register offset
550 *
551 * RSND_xxx_I_N for Audio DMAC input
552 * RSND_xxx_O_N for Audio DMAC output
553 * RSND_xxx_I_P for Audio DMAC peri peri input
554 * RSND_xxx_O_P for Audio DMAC peri peri output
555 *
556 * ex) R-Car H2 case
557 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
558 * SSI : 0xec541000 / 0xec241008 / 0xec24100c
559 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
560 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
561 * CMD : 0xec500000 / / 0xec008000 0xec308000
562 */
563 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
564 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
565
566 #define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
567 #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
568
569 #define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
570 #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
571
572 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
573 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
574
575 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
576 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
577
578 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
579 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
580
581 static dma_addr_t
rsnd_gen2_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)582 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
583 struct rsnd_mod *mod,
584 int is_play, int is_from)
585 {
586 struct rsnd_priv *priv = rsnd_io_to_priv(io);
587 struct device *dev = rsnd_priv_to_dev(priv);
588 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
589 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
590 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
591 !!(rsnd_io_to_mod_ssiu(io) == mod);
592 int use_src = !!rsnd_io_to_mod_src(io);
593 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
594 !!rsnd_io_to_mod_mix(io) ||
595 !!rsnd_io_to_mod_ctu(io);
596 int id = rsnd_mod_id(mod);
597 int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
598 struct dma_addr {
599 dma_addr_t out_addr;
600 dma_addr_t in_addr;
601 } dma_addrs[3][2][3] = {
602 /* SRC */
603 /* Capture */
604 {{{ 0, 0 },
605 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
606 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
607 /* Playback */
608 {{ 0, 0, },
609 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
610 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
611 },
612 /* SSI */
613 /* Capture */
614 {{{ RDMA_SSI_O_N(ssi, id), 0 },
615 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
616 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
617 /* Playback */
618 {{ 0, RDMA_SSI_I_N(ssi, id) },
619 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
620 { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
621 },
622 /* SSIU */
623 /* Capture */
624 {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
625 { RDMA_SSIU_O_P(ssi, id, busif), 0 },
626 { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
627 /* Playback */
628 {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
629 { 0, RDMA_SSIU_I_P(ssi, id, busif) },
630 { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
631 };
632
633 /*
634 * FIXME
635 *
636 * We can't support SSI9-4/5/6/7, because its address is
637 * out of calculation rule
638 */
639 if ((id == 9) && (busif >= 4))
640 dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
641 id, busif);
642
643 /* it shouldn't happen */
644 if (use_cmd && !use_src)
645 dev_err(dev, "DVC is selected without SRC\n");
646
647 /* use SSIU or SSI ? */
648 if (is_ssi && rsnd_ssi_use_busif(io))
649 is_ssi++;
650
651 return (is_from) ?
652 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
653 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
654 }
655
656 /*
657 * Gen4 DMA read/write register offset
658 *
659 * ex) R-Car V4H case
660 * mod / SYS-DMAC in / SYS-DMAC out
661 * SSI_SDMC: 0xec400000 / 0xec400000 / 0xec400000
662 */
663 #define RDMA_SSI_SDMC(addr, i) (addr + (0x8000 * i))
664 static dma_addr_t
rsnd_gen4_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)665 rsnd_gen4_dma_addr(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
666 int is_play, int is_from)
667 {
668 struct rsnd_priv *priv = rsnd_io_to_priv(io);
669 phys_addr_t addr = rsnd_gen_get_phy_addr(priv, RSND_GEN4_SDMC);
670 int id = rsnd_mod_id(mod);
671 int busif = rsnd_mod_id_sub(mod);
672
673 /*
674 * SSI0 only is supported
675 */
676 if (id != 0) {
677 struct device *dev = rsnd_priv_to_dev(priv);
678
679 dev_err(dev, "This driver doesn't support non SSI0");
680 return -EINVAL;
681 }
682
683 return RDMA_SSI_SDMC(addr, busif);
684 }
685
rsnd_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)686 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
687 struct rsnd_mod *mod,
688 int is_play, int is_from)
689 {
690 struct rsnd_priv *priv = rsnd_io_to_priv(io);
691
692 if (!mod)
693 return 0;
694
695 /*
696 * gen1 uses default DMA addr
697 */
698 if (rsnd_is_gen1(priv))
699 return 0;
700 else if (rsnd_is_gen4(priv))
701 return rsnd_gen4_dma_addr(io, mod, is_play, is_from);
702 else
703 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
704 }
705
706 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
rsnd_dma_of_path(struct rsnd_mod * this,struct rsnd_dai_stream * io,int is_play,struct rsnd_mod ** mod_from,struct rsnd_mod ** mod_to)707 static void rsnd_dma_of_path(struct rsnd_mod *this,
708 struct rsnd_dai_stream *io,
709 int is_play,
710 struct rsnd_mod **mod_from,
711 struct rsnd_mod **mod_to)
712 {
713 struct rsnd_mod *ssi;
714 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
715 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
716 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
717 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
718 struct rsnd_mod *mod[MOD_MAX];
719 struct rsnd_mod *mod_start, *mod_end;
720 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
721 struct device *dev = rsnd_priv_to_dev(priv);
722 int nr, i, idx;
723
724 /*
725 * It should use "rcar_sound,ssiu" on DT.
726 * But, we need to keep compatibility for old version.
727 *
728 * If it has "rcar_sound.ssiu", it will be used.
729 * If not, "rcar_sound.ssi" will be used.
730 * see
731 * rsnd_ssiu_dma_req()
732 * rsnd_ssi_dma_req()
733 */
734 if (rsnd_ssiu_of_node(priv)) {
735 struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
736
737 /* use SSIU */
738 ssi = ssiu;
739 if (this == rsnd_io_to_mod_ssi(io))
740 this = ssiu;
741 } else {
742 /* keep compatible, use SSI */
743 ssi = rsnd_io_to_mod_ssi(io);
744 }
745
746 if (!ssi)
747 return;
748
749 nr = 0;
750 for (i = 0; i < MOD_MAX; i++) {
751 mod[i] = NULL;
752 nr += !!rsnd_io_to_mod(io, i);
753 }
754
755 /*
756 * [S] -*-> [E]
757 * [S] -*-> SRC -o-> [E]
758 * [S] -*-> SRC -> DVC -o-> [E]
759 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
760 *
761 * playback [S] = mem
762 * [E] = SSI
763 *
764 * capture [S] = SSI
765 * [E] = mem
766 *
767 * -*-> Audio DMAC
768 * -o-> Audio DMAC peri peri
769 */
770 mod_start = (is_play) ? NULL : ssi;
771 mod_end = (is_play) ? ssi : NULL;
772
773 idx = 0;
774 mod[idx++] = mod_start;
775 for (i = 1; i < nr; i++) {
776 if (src) {
777 mod[idx++] = src;
778 src = NULL;
779 } else if (ctu) {
780 mod[idx++] = ctu;
781 ctu = NULL;
782 } else if (mix) {
783 mod[idx++] = mix;
784 mix = NULL;
785 } else if (dvc) {
786 mod[idx++] = dvc;
787 dvc = NULL;
788 }
789 }
790 mod[idx] = mod_end;
791
792 /*
793 * | SSI | SRC |
794 * -------------+-----+-----+
795 * is_play | o | * |
796 * !is_play | * | o |
797 */
798 if ((this == ssi) == (is_play)) {
799 *mod_from = mod[idx - 1];
800 *mod_to = mod[idx];
801 } else {
802 *mod_from = mod[0];
803 *mod_to = mod[1];
804 }
805
806 dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
807 for (i = 0; i <= idx; i++) {
808 dev_dbg(dev, " %s%s\n",
809 rsnd_mod_name(mod[i] ? mod[i] : &mem),
810 (mod[i] == *mod_from) ? " from" :
811 (mod[i] == *mod_to) ? " to" : "");
812 }
813 }
814
rsnd_dma_alloc(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)815 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
816 struct rsnd_mod **dma_mod)
817 {
818 struct rsnd_mod *mod_from = NULL;
819 struct rsnd_mod *mod_to = NULL;
820 struct rsnd_priv *priv = rsnd_io_to_priv(io);
821 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
822 struct device *dev = rsnd_priv_to_dev(priv);
823 struct rsnd_dma *dma;
824 struct rsnd_mod_ops *ops;
825 enum rsnd_mod_type type;
826 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
827 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
828 int is_play = rsnd_io_is_play(io);
829 int ret, dma_id;
830
831 /*
832 * DMA failed. try to PIO mode
833 * see
834 * rsnd_ssi_fallback()
835 * rsnd_rdai_continuance_probe()
836 */
837 if (!dmac)
838 return -EAGAIN;
839
840 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
841
842 /* for Gen2 or later */
843 if (mod_from && mod_to) {
844 ops = &rsnd_dmapp_ops;
845 attach = rsnd_dmapp_attach;
846 dma_id = dmac->dmapp_num;
847 type = RSND_MOD_AUDMAPP;
848 } else {
849 ops = &rsnd_dmaen_ops;
850 attach = rsnd_dmaen_attach;
851 dma_id = dmac->dmaen_num;
852 type = RSND_MOD_AUDMA;
853 }
854
855 /* for Gen1, overwrite */
856 if (rsnd_is_gen1(priv)) {
857 ops = &rsnd_dmaen_ops;
858 attach = rsnd_dmaen_attach;
859 dma_id = dmac->dmaen_num;
860 type = RSND_MOD_AUDMA;
861 }
862
863 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
864 if (!dma)
865 return -ENOMEM;
866
867 *dma_mod = rsnd_mod_get(dma);
868
869 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
870 type, dma_id);
871 if (ret < 0)
872 return ret;
873
874 dev_dbg(dev, "%s %s -> %s\n",
875 rsnd_mod_name(*dma_mod),
876 rsnd_mod_name(mod_from ? mod_from : &mem),
877 rsnd_mod_name(mod_to ? mod_to : &mem));
878
879 ret = attach(io, dma, mod_from, mod_to);
880 if (ret < 0)
881 return ret;
882
883 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
884 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
885 dma->mod_from = mod_from;
886 dma->mod_to = mod_to;
887
888 return 0;
889 }
890
rsnd_dma_attach(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)891 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
892 struct rsnd_mod **dma_mod)
893 {
894 if (!(*dma_mod)) {
895 int ret = rsnd_dma_alloc(io, mod, dma_mod);
896
897 if (ret < 0)
898 return ret;
899 }
900
901 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
902 }
903
rsnd_dma_probe(struct rsnd_priv * priv)904 int rsnd_dma_probe(struct rsnd_priv *priv)
905 {
906 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
907 struct device *dev = rsnd_priv_to_dev(priv);
908 struct rsnd_dma_ctrl *dmac;
909 struct resource *res;
910
911 /*
912 * for Gen1
913 */
914 if (rsnd_is_gen1(priv))
915 return 0;
916
917 /*
918 * for Gen2 or later
919 */
920 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
921 if (!dmac) {
922 dev_err(dev, "dma allocate failed\n");
923 return 0; /* it will be PIO mode */
924 }
925
926 /* for Gen4 doesn't have DMA-pp */
927 if (rsnd_is_gen4(priv))
928 goto audmapp_end;
929
930 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
931 if (!res) {
932 dev_err(dev, "lack of audmapp in DT\n");
933 return 0; /* it will be PIO mode */
934 }
935
936 dmac->dmapp_num = 0;
937 dmac->ppres = res->start;
938 dmac->ppbase = devm_ioremap_resource(dev, res);
939 if (IS_ERR(dmac->ppbase))
940 return PTR_ERR(dmac->ppbase);
941 audmapp_end:
942 priv->dma = dmac;
943
944 /* dummy mem mod for debug */
945 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
946 }
947