xref: /openbmc/linux/sound/soc/sh/rcar/dma.c (revision 2ce1b21c)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Renesas R-Car Audio DMAC support
4 //
5 // Copyright (C) 2015 Renesas Electronics Corp.
6 // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7 
8 #include <linux/delay.h>
9 #include <linux/of_dma.h>
10 #include "rsnd.h"
11 
12 /*
13  * Audio DMAC peri peri register
14  */
15 #define PDMASAR		0x00
16 #define PDMADAR		0x04
17 #define PDMACHCR	0x0c
18 
19 /* PDMACHCR */
20 #define PDMACHCR_DE		(1 << 0)
21 
22 
23 struct rsnd_dmaen {
24 	struct dma_chan		*chan;
25 	dma_cookie_t		cookie;
26 	unsigned int		dma_len;
27 };
28 
29 struct rsnd_dmapp {
30 	int			dmapp_id;
31 	u32			chcr;
32 };
33 
34 struct rsnd_dma {
35 	struct rsnd_mod		mod;
36 	struct rsnd_mod		*mod_from;
37 	struct rsnd_mod		*mod_to;
38 	dma_addr_t		src_addr;
39 	dma_addr_t		dst_addr;
40 	union {
41 		struct rsnd_dmaen en;
42 		struct rsnd_dmapp pp;
43 	} dma;
44 };
45 
46 struct rsnd_dma_ctrl {
47 	void __iomem *ppbase;
48 	phys_addr_t ppres;
49 	int dmaen_num;
50 	int dmapp_num;
51 };
52 
53 #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
54 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
55 #define rsnd_dma_to_dmaen(dma)	(&(dma)->dma.en)
56 #define rsnd_dma_to_dmapp(dma)	(&(dma)->dma.pp)
57 
58 /* for DEBUG */
59 static struct rsnd_mod_ops mem_ops = {
60 	.name = "mem",
61 };
62 
63 static struct rsnd_mod mem = {
64 };
65 
66 /*
67  *		Audio DMAC
68  */
69 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
70 				  struct rsnd_dai_stream *io)
71 {
72 	if (rsnd_io_is_working(io))
73 		rsnd_dai_period_elapsed(io);
74 }
75 
76 static void rsnd_dmaen_complete(void *data)
77 {
78 	struct rsnd_mod *mod = data;
79 
80 	rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
81 }
82 
83 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
84 						   struct rsnd_mod *mod_from,
85 						   struct rsnd_mod *mod_to)
86 {
87 	if ((!mod_from && !mod_to) ||
88 	    (mod_from && mod_to))
89 		return NULL;
90 
91 	if (mod_from)
92 		return rsnd_mod_dma_req(io, mod_from);
93 	else
94 		return rsnd_mod_dma_req(io, mod_to);
95 }
96 
97 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
98 			   struct rsnd_dai_stream *io,
99 			   struct rsnd_priv *priv)
100 {
101 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
102 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
103 
104 	if (dmaen->chan)
105 		dmaengine_terminate_async(dmaen->chan);
106 
107 	return 0;
108 }
109 
110 static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
111 			      struct rsnd_dai_stream *io,
112 			      struct rsnd_priv *priv)
113 {
114 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
115 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
116 
117 	/*
118 	 * DMAEngine release uses mutex lock.
119 	 * Thus, it shouldn't be called under spinlock.
120 	 * Let's call it under prepare
121 	 */
122 	if (dmaen->chan)
123 		dma_release_channel(dmaen->chan);
124 
125 	dmaen->chan = NULL;
126 
127 	return 0;
128 }
129 
130 static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
131 			      struct rsnd_dai_stream *io,
132 			      struct rsnd_priv *priv)
133 {
134 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
135 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
136 	struct device *dev = rsnd_priv_to_dev(priv);
137 
138 	/* maybe suspended */
139 	if (dmaen->chan)
140 		return 0;
141 
142 	/*
143 	 * DMAEngine request uses mutex lock.
144 	 * Thus, it shouldn't be called under spinlock.
145 	 * Let's call it under prepare
146 	 */
147 	dmaen->chan = rsnd_dmaen_request_channel(io,
148 						 dma->mod_from,
149 						 dma->mod_to);
150 	if (IS_ERR_OR_NULL(dmaen->chan)) {
151 		dmaen->chan = NULL;
152 		dev_err(dev, "can't get dma channel\n");
153 		return -EIO;
154 	}
155 
156 	return 0;
157 }
158 
159 static int rsnd_dmaen_start(struct rsnd_mod *mod,
160 			    struct rsnd_dai_stream *io,
161 			    struct rsnd_priv *priv)
162 {
163 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
164 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
165 	struct snd_pcm_substream *substream = io->substream;
166 	struct device *dev = rsnd_priv_to_dev(priv);
167 	struct dma_async_tx_descriptor *desc;
168 	struct dma_slave_config cfg = {};
169 	enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
170 	int is_play = rsnd_io_is_play(io);
171 	int ret;
172 
173 	/*
174 	 * in case of monaural data writing or reading through Audio-DMAC
175 	 * data is always in Left Justified format, so both src and dst
176 	 * DMA Bus width need to be set equal to physical data width.
177 	 */
178 	if (rsnd_runtime_channel_original(io) == 1) {
179 		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
180 		int bits = snd_pcm_format_physical_width(runtime->format);
181 
182 		switch (bits) {
183 		case 8:
184 			buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
185 			break;
186 		case 16:
187 			buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
188 			break;
189 		case 32:
190 			buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
191 			break;
192 		default:
193 			dev_err(dev, "invalid format width %d\n", bits);
194 			return -EINVAL;
195 		}
196 	}
197 
198 	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
199 	cfg.src_addr	= dma->src_addr;
200 	cfg.dst_addr	= dma->dst_addr;
201 	cfg.src_addr_width = buswidth;
202 	cfg.dst_addr_width = buswidth;
203 
204 	dev_dbg(dev, "%s %pad -> %pad\n",
205 		rsnd_mod_name(mod),
206 		&cfg.src_addr, &cfg.dst_addr);
207 
208 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
209 	if (ret < 0)
210 		return ret;
211 
212 	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
213 					 substream->runtime->dma_addr,
214 					 snd_pcm_lib_buffer_bytes(substream),
215 					 snd_pcm_lib_period_bytes(substream),
216 					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
217 					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
218 
219 	if (!desc) {
220 		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
221 		return -EIO;
222 	}
223 
224 	desc->callback		= rsnd_dmaen_complete;
225 	desc->callback_param	= rsnd_mod_get(dma);
226 
227 	dmaen->dma_len		= snd_pcm_lib_buffer_bytes(substream);
228 
229 	dmaen->cookie = dmaengine_submit(desc);
230 	if (dmaen->cookie < 0) {
231 		dev_err(dev, "dmaengine_submit() fail\n");
232 		return -EIO;
233 	}
234 
235 	dma_async_issue_pending(dmaen->chan);
236 
237 	return 0;
238 }
239 
240 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, char *name,
241 					  struct rsnd_mod *mod, char *x)
242 {
243 	struct dma_chan *chan = NULL;
244 	struct device_node *np;
245 	int i = 0;
246 
247 	for_each_child_of_node(of_node, np) {
248 		i = rsnd_node_fixed_index(np, name, i);
249 
250 		if (i == rsnd_mod_id_raw(mod) && (!chan))
251 			chan = of_dma_request_slave_channel(np, x);
252 		i++;
253 	}
254 
255 	/* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
256 	of_node_put(of_node);
257 
258 	return chan;
259 }
260 
261 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
262 			   struct rsnd_dma *dma,
263 			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
264 {
265 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
266 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
267 	struct dma_chan *chan;
268 
269 	/* try to get DMAEngine channel */
270 	chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
271 	if (IS_ERR_OR_NULL(chan)) {
272 		/* Let's follow when -EPROBE_DEFER case */
273 		if (PTR_ERR(chan) == -EPROBE_DEFER)
274 			return PTR_ERR(chan);
275 
276 		/*
277 		 * DMA failed. try to PIO mode
278 		 * see
279 		 *	rsnd_ssi_fallback()
280 		 *	rsnd_rdai_continuance_probe()
281 		 */
282 		return -EAGAIN;
283 	}
284 
285 	/*
286 	 * use it for IPMMU if needed
287 	 * see
288 	 *	rsnd_preallocate_pages()
289 	 */
290 	io->dmac_dev = chan->device->dev;
291 
292 	dma_release_channel(chan);
293 
294 	dmac->dmaen_num++;
295 
296 	return 0;
297 }
298 
299 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
300 			      struct rsnd_dai_stream *io,
301 			      snd_pcm_uframes_t *pointer)
302 {
303 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
304 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
305 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
306 	struct dma_tx_state state;
307 	enum dma_status status;
308 	unsigned int pos = 0;
309 
310 	status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
311 	if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
312 		if (state.residue > 0 && state.residue <= dmaen->dma_len)
313 			pos = dmaen->dma_len - state.residue;
314 	}
315 	*pointer = bytes_to_frames(runtime, pos);
316 
317 	return 0;
318 }
319 
320 static struct rsnd_mod_ops rsnd_dmaen_ops = {
321 	.name		= "audmac",
322 	.prepare	= rsnd_dmaen_prepare,
323 	.cleanup	= rsnd_dmaen_cleanup,
324 	.start		= rsnd_dmaen_start,
325 	.stop		= rsnd_dmaen_stop,
326 	.pointer	= rsnd_dmaen_pointer,
327 	.get_status	= rsnd_mod_get_status,
328 };
329 
330 /*
331  *		Audio DMAC peri peri
332  */
333 static const u8 gen2_id_table_ssiu[] = {
334 	/* SSI00 ~ SSI07 */
335 	0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
336 	/* SSI10 ~ SSI17 */
337 	0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
338 	/* SSI20 ~ SSI27 */
339 	0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
340 	/* SSI30 ~ SSI37 */
341 	0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
342 	/* SSI40 ~ SSI47 */
343 	0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
344 	/* SSI5 */
345 	0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
346 	/* SSI6 */
347 	0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
348 	/* SSI7 */
349 	0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 	/* SSI8 */
351 	0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
352 	/* SSI90 ~ SSI97 */
353 	0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
354 };
355 static const u8 gen2_id_table_scu[] = {
356 	0x2d, /* SCU_SRCI0 */
357 	0x2e, /* SCU_SRCI1 */
358 	0x2f, /* SCU_SRCI2 */
359 	0x30, /* SCU_SRCI3 */
360 	0x31, /* SCU_SRCI4 */
361 	0x32, /* SCU_SRCI5 */
362 	0x33, /* SCU_SRCI6 */
363 	0x34, /* SCU_SRCI7 */
364 	0x35, /* SCU_SRCI8 */
365 	0x36, /* SCU_SRCI9 */
366 };
367 static const u8 gen2_id_table_cmd[] = {
368 	0x37, /* SCU_CMD0 */
369 	0x38, /* SCU_CMD1 */
370 };
371 
372 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
373 			     struct rsnd_mod *mod)
374 {
375 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
376 	struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
377 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
378 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
379 	const u8 *entry = NULL;
380 	int id = 255;
381 	int size = 0;
382 
383 	if ((mod == ssi) ||
384 	    (mod == ssiu)) {
385 		int busif = rsnd_mod_id_sub(ssiu);
386 
387 		entry = gen2_id_table_ssiu;
388 		size = ARRAY_SIZE(gen2_id_table_ssiu);
389 		id = (rsnd_mod_id(mod) * 8) + busif;
390 	} else if (mod == src) {
391 		entry = gen2_id_table_scu;
392 		size = ARRAY_SIZE(gen2_id_table_scu);
393 		id = rsnd_mod_id(mod);
394 	} else if (mod == dvc) {
395 		entry = gen2_id_table_cmd;
396 		size = ARRAY_SIZE(gen2_id_table_cmd);
397 		id = rsnd_mod_id(mod);
398 	}
399 
400 	if ((!entry) || (size <= id)) {
401 		struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
402 
403 		dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
404 
405 		/* use non-prohibited SRS number as error */
406 		return 0x00; /* SSI00 */
407 	}
408 
409 	return entry[id];
410 }
411 
412 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
413 			       struct rsnd_mod *mod_from,
414 			       struct rsnd_mod *mod_to)
415 {
416 	return	(rsnd_dmapp_get_id(io, mod_from) << 24) +
417 		(rsnd_dmapp_get_id(io, mod_to) << 16);
418 }
419 
420 #define rsnd_dmapp_addr(dmac, dma, reg) \
421 	(dmac->ppbase + 0x20 + reg + \
422 	 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
423 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
424 {
425 	struct rsnd_mod *mod = rsnd_mod_get(dma);
426 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
427 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
428 	struct device *dev = rsnd_priv_to_dev(priv);
429 
430 	dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
431 
432 	iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
433 }
434 
435 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
436 {
437 	struct rsnd_mod *mod = rsnd_mod_get(dma);
438 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
439 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
440 
441 	return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
442 }
443 
444 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
445 {
446 	struct rsnd_mod *mod = rsnd_mod_get(dma);
447 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
448 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
449 	void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
450 	u32 val = ioread32(addr);
451 
452 	val &= ~mask;
453 	val |= (data & mask);
454 
455 	iowrite32(val, addr);
456 }
457 
458 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
459 			   struct rsnd_dai_stream *io,
460 			   struct rsnd_priv *priv)
461 {
462 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
463 	int i;
464 
465 	rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
466 
467 	for (i = 0; i < 1024; i++) {
468 		if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
469 			return 0;
470 		udelay(1);
471 	}
472 
473 	return -EIO;
474 }
475 
476 static int rsnd_dmapp_start(struct rsnd_mod *mod,
477 			    struct rsnd_dai_stream *io,
478 			    struct rsnd_priv *priv)
479 {
480 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
481 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
482 
483 	rsnd_dmapp_write(dma, dma->src_addr,	PDMASAR);
484 	rsnd_dmapp_write(dma, dma->dst_addr,	PDMADAR);
485 	rsnd_dmapp_write(dma, dmapp->chcr,	PDMACHCR);
486 
487 	return 0;
488 }
489 
490 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
491 			     struct rsnd_dma *dma,
492 			     struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
493 {
494 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
495 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
496 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
497 	struct device *dev = rsnd_priv_to_dev(priv);
498 
499 	dmapp->dmapp_id = dmac->dmapp_num;
500 	dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
501 
502 	dmac->dmapp_num++;
503 
504 	dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
505 		dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
506 
507 	return 0;
508 }
509 
510 #ifdef CONFIG_DEBUG_FS
511 static void rsnd_dmapp_debug_info(struct seq_file *m,
512 				  struct rsnd_dai_stream *io,
513 				  struct rsnd_mod *mod)
514 {
515 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
516 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
517 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
518 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
519 
520 	rsnd_debugfs_reg_show(m, dmac->ppres, dmac->ppbase,
521 			      0x20 + 0x10 * dmapp->dmapp_id, 0x10);
522 }
523 #define DEBUG_INFO .debug_info = rsnd_dmapp_debug_info
524 #else
525 #define DEBUG_INFO
526 #endif
527 
528 static struct rsnd_mod_ops rsnd_dmapp_ops = {
529 	.name		= "audmac-pp",
530 	.start		= rsnd_dmapp_start,
531 	.stop		= rsnd_dmapp_stop,
532 	.quit		= rsnd_dmapp_stop,
533 	.get_status	= rsnd_mod_get_status,
534 	DEBUG_INFO
535 };
536 
537 /*
538  *		Common DMAC Interface
539  */
540 
541 /*
542  *	DMA read/write register offset
543  *
544  *	RSND_xxx_I_N	for Audio DMAC input
545  *	RSND_xxx_O_N	for Audio DMAC output
546  *	RSND_xxx_I_P	for Audio DMAC peri peri input
547  *	RSND_xxx_O_P	for Audio DMAC peri peri output
548  *
549  *	ex) R-Car H2 case
550  *	      mod        / DMAC in    / DMAC out   / DMAC PP in / DMAC pp out
551  *	SSI : 0xec541000 / 0xec241008 / 0xec24100c
552  *	SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
553  *	SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
554  *	CMD : 0xec500000 /            / 0xec008000                0xec308000
555  */
556 #define RDMA_SSI_I_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
557 #define RDMA_SSI_O_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
558 
559 #define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
560 #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
561 
562 #define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
563 #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
564 
565 #define RDMA_SRC_I_N(addr, i)	(addr ##_reg - 0x00500000 + (0x400 * i))
566 #define RDMA_SRC_O_N(addr, i)	(addr ##_reg - 0x004fc000 + (0x400 * i))
567 
568 #define RDMA_SRC_I_P(addr, i)	(addr ##_reg - 0x00200000 + (0x400 * i))
569 #define RDMA_SRC_O_P(addr, i)	(addr ##_reg - 0x001fc000 + (0x400 * i))
570 
571 #define RDMA_CMD_O_N(addr, i)	(addr ##_reg - 0x004f8000 + (0x400 * i))
572 #define RDMA_CMD_O_P(addr, i)	(addr ##_reg - 0x001f8000 + (0x400 * i))
573 
574 static dma_addr_t
575 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
576 		   struct rsnd_mod *mod,
577 		   int is_play, int is_from)
578 {
579 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
580 	struct device *dev = rsnd_priv_to_dev(priv);
581 	phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
582 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
583 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
584 		     !!(rsnd_io_to_mod_ssiu(io) == mod);
585 	int use_src = !!rsnd_io_to_mod_src(io);
586 	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
587 		      !!rsnd_io_to_mod_mix(io) ||
588 		      !!rsnd_io_to_mod_ctu(io);
589 	int id = rsnd_mod_id(mod);
590 	int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
591 	struct dma_addr {
592 		dma_addr_t out_addr;
593 		dma_addr_t in_addr;
594 	} dma_addrs[3][2][3] = {
595 		/* SRC */
596 		/* Capture */
597 		{{{ 0,				0 },
598 		  { RDMA_SRC_O_N(src, id),	RDMA_SRC_I_P(src, id) },
599 		  { RDMA_CMD_O_N(src, id),	RDMA_SRC_I_P(src, id) } },
600 		 /* Playback */
601 		 {{ 0,				0, },
602 		  { RDMA_SRC_O_P(src, id),	RDMA_SRC_I_N(src, id) },
603 		  { RDMA_CMD_O_P(src, id),	RDMA_SRC_I_N(src, id) } }
604 		},
605 		/* SSI */
606 		/* Capture */
607 		{{{ RDMA_SSI_O_N(ssi, id),		0 },
608 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 },
609 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 } },
610 		 /* Playback */
611 		 {{ 0,			RDMA_SSI_I_N(ssi, id) },
612 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) },
613 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) } }
614 		},
615 		/* SSIU */
616 		/* Capture */
617 		{{{ RDMA_SSIU_O_N(ssi, id, busif),	0 },
618 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 },
619 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 } },
620 		 /* Playback */
621 		 {{ 0,			RDMA_SSIU_I_N(ssi, id, busif) },
622 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) },
623 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) } } },
624 	};
625 
626 	/*
627 	 * FIXME
628 	 *
629 	 * We can't support SSI9-4/5/6/7, because its address is
630 	 * out of calculation rule
631 	 */
632 	if ((id == 9) && (busif >= 4))
633 		dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
634 			id, busif);
635 
636 	/* it shouldn't happen */
637 	if (use_cmd && !use_src)
638 		dev_err(dev, "DVC is selected without SRC\n");
639 
640 	/* use SSIU or SSI ? */
641 	if (is_ssi && rsnd_ssi_use_busif(io))
642 		is_ssi++;
643 
644 	return (is_from) ?
645 		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
646 		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
647 }
648 
649 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
650 				struct rsnd_mod *mod,
651 				int is_play, int is_from)
652 {
653 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
654 
655 	/*
656 	 * gen1 uses default DMA addr
657 	 */
658 	if (rsnd_is_gen1(priv))
659 		return 0;
660 
661 	if (!mod)
662 		return 0;
663 
664 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
665 }
666 
667 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
668 static void rsnd_dma_of_path(struct rsnd_mod *this,
669 			     struct rsnd_dai_stream *io,
670 			     int is_play,
671 			     struct rsnd_mod **mod_from,
672 			     struct rsnd_mod **mod_to)
673 {
674 	struct rsnd_mod *ssi;
675 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
676 	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
677 	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
678 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
679 	struct rsnd_mod *mod[MOD_MAX];
680 	struct rsnd_mod *mod_start, *mod_end;
681 	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
682 	struct device *dev = rsnd_priv_to_dev(priv);
683 	int nr, i, idx;
684 
685 	/*
686 	 * It should use "rcar_sound,ssiu" on DT.
687 	 * But, we need to keep compatibility for old version.
688 	 *
689 	 * If it has "rcar_sound.ssiu", it will be used.
690 	 * If not, "rcar_sound.ssi" will be used.
691 	 * see
692 	 *	rsnd_ssiu_dma_req()
693 	 *	rsnd_ssi_dma_req()
694 	 */
695 	if (rsnd_ssiu_of_node(priv)) {
696 		struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
697 
698 		/* use SSIU */
699 		ssi = ssiu;
700 		if (this == rsnd_io_to_mod_ssi(io))
701 			this = ssiu;
702 	} else {
703 		/* keep compatible, use SSI */
704 		ssi = rsnd_io_to_mod_ssi(io);
705 	}
706 
707 	if (!ssi)
708 		return;
709 
710 	nr = 0;
711 	for (i = 0; i < MOD_MAX; i++) {
712 		mod[i] = NULL;
713 		nr += !!rsnd_io_to_mod(io, i);
714 	}
715 
716 	/*
717 	 * [S] -*-> [E]
718 	 * [S] -*-> SRC -o-> [E]
719 	 * [S] -*-> SRC -> DVC -o-> [E]
720 	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
721 	 *
722 	 * playback	[S] = mem
723 	 *		[E] = SSI
724 	 *
725 	 * capture	[S] = SSI
726 	 *		[E] = mem
727 	 *
728 	 * -*->		Audio DMAC
729 	 * -o->		Audio DMAC peri peri
730 	 */
731 	mod_start	= (is_play) ? NULL : ssi;
732 	mod_end		= (is_play) ? ssi  : NULL;
733 
734 	idx = 0;
735 	mod[idx++] = mod_start;
736 	for (i = 1; i < nr; i++) {
737 		if (src) {
738 			mod[idx++] = src;
739 			src = NULL;
740 		} else if (ctu) {
741 			mod[idx++] = ctu;
742 			ctu = NULL;
743 		} else if (mix) {
744 			mod[idx++] = mix;
745 			mix = NULL;
746 		} else if (dvc) {
747 			mod[idx++] = dvc;
748 			dvc = NULL;
749 		}
750 	}
751 	mod[idx] = mod_end;
752 
753 	/*
754 	 *		| SSI | SRC |
755 	 * -------------+-----+-----+
756 	 *  is_play	|  o  |  *  |
757 	 * !is_play	|  *  |  o  |
758 	 */
759 	if ((this == ssi) == (is_play)) {
760 		*mod_from	= mod[idx - 1];
761 		*mod_to		= mod[idx];
762 	} else {
763 		*mod_from	= mod[0];
764 		*mod_to		= mod[1];
765 	}
766 
767 	dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
768 	for (i = 0; i <= idx; i++) {
769 		dev_dbg(dev, "  %s%s\n",
770 			rsnd_mod_name(mod[i] ? mod[i] : &mem),
771 			(mod[i] == *mod_from) ? " from" :
772 			(mod[i] == *mod_to)   ? " to" : "");
773 	}
774 }
775 
776 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
777 			  struct rsnd_mod **dma_mod)
778 {
779 	struct rsnd_mod *mod_from = NULL;
780 	struct rsnd_mod *mod_to = NULL;
781 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
782 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
783 	struct device *dev = rsnd_priv_to_dev(priv);
784 	struct rsnd_dma *dma;
785 	struct rsnd_mod_ops *ops;
786 	enum rsnd_mod_type type;
787 	int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
788 		      struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
789 	int is_play = rsnd_io_is_play(io);
790 	int ret, dma_id;
791 
792 	/*
793 	 * DMA failed. try to PIO mode
794 	 * see
795 	 *	rsnd_ssi_fallback()
796 	 *	rsnd_rdai_continuance_probe()
797 	 */
798 	if (!dmac)
799 		return -EAGAIN;
800 
801 	rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
802 
803 	/* for Gen2 or later */
804 	if (mod_from && mod_to) {
805 		ops	= &rsnd_dmapp_ops;
806 		attach	= rsnd_dmapp_attach;
807 		dma_id	= dmac->dmapp_num;
808 		type	= RSND_MOD_AUDMAPP;
809 	} else {
810 		ops	= &rsnd_dmaen_ops;
811 		attach	= rsnd_dmaen_attach;
812 		dma_id	= dmac->dmaen_num;
813 		type	= RSND_MOD_AUDMA;
814 	}
815 
816 	/* for Gen1, overwrite */
817 	if (rsnd_is_gen1(priv)) {
818 		ops	= &rsnd_dmaen_ops;
819 		attach	= rsnd_dmaen_attach;
820 		dma_id	= dmac->dmaen_num;
821 		type	= RSND_MOD_AUDMA;
822 	}
823 
824 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
825 	if (!dma)
826 		return -ENOMEM;
827 
828 	*dma_mod = rsnd_mod_get(dma);
829 
830 	ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
831 			    type, dma_id);
832 	if (ret < 0)
833 		return ret;
834 
835 	dev_dbg(dev, "%s %s -> %s\n",
836 		rsnd_mod_name(*dma_mod),
837 		rsnd_mod_name(mod_from ? mod_from : &mem),
838 		rsnd_mod_name(mod_to   ? mod_to   : &mem));
839 
840 	ret = attach(io, dma, mod_from, mod_to);
841 	if (ret < 0)
842 		return ret;
843 
844 	dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
845 	dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
846 	dma->mod_from = mod_from;
847 	dma->mod_to   = mod_to;
848 
849 	return 0;
850 }
851 
852 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
853 		    struct rsnd_mod **dma_mod)
854 {
855 	if (!(*dma_mod)) {
856 		int ret = rsnd_dma_alloc(io, mod, dma_mod);
857 
858 		if (ret < 0)
859 			return ret;
860 	}
861 
862 	return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
863 }
864 
865 int rsnd_dma_probe(struct rsnd_priv *priv)
866 {
867 	struct platform_device *pdev = rsnd_priv_to_pdev(priv);
868 	struct device *dev = rsnd_priv_to_dev(priv);
869 	struct rsnd_dma_ctrl *dmac;
870 	struct resource *res;
871 
872 	/*
873 	 * for Gen1
874 	 */
875 	if (rsnd_is_gen1(priv))
876 		return 0;
877 
878 	/*
879 	 * for Gen2 or later
880 	 */
881 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
882 	dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
883 	if (!dmac || !res) {
884 		dev_err(dev, "dma allocate failed\n");
885 		return 0; /* it will be PIO mode */
886 	}
887 
888 	dmac->dmapp_num = 0;
889 	dmac->ppres  = res->start;
890 	dmac->ppbase = devm_ioremap_resource(dev, res);
891 	if (IS_ERR(dmac->ppbase))
892 		return PTR_ERR(dmac->ppbase);
893 
894 	priv->dma = dmac;
895 
896 	/* dummy mem mod for debug */
897 	return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
898 }
899