1 /* 2 * Renesas R-Car Audio DMAC support 3 * 4 * Copyright (C) 2015 Renesas Electronics Corp. 5 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/delay.h> 12 #include <linux/of_dma.h> 13 #include "rsnd.h" 14 15 /* 16 * Audio DMAC peri peri register 17 */ 18 #define PDMASAR 0x00 19 #define PDMADAR 0x04 20 #define PDMACHCR 0x0c 21 22 /* PDMACHCR */ 23 #define PDMACHCR_DE (1 << 0) 24 25 26 struct rsnd_dmaen { 27 struct dma_chan *chan; 28 dma_cookie_t cookie; 29 unsigned int dma_len; 30 }; 31 32 struct rsnd_dmapp { 33 int dmapp_id; 34 u32 chcr; 35 }; 36 37 struct rsnd_dma { 38 struct rsnd_mod mod; 39 struct rsnd_mod *mod_from; 40 struct rsnd_mod *mod_to; 41 dma_addr_t src_addr; 42 dma_addr_t dst_addr; 43 union { 44 struct rsnd_dmaen en; 45 struct rsnd_dmapp pp; 46 } dma; 47 }; 48 49 struct rsnd_dma_ctrl { 50 void __iomem *base; 51 int dmaen_num; 52 int dmapp_num; 53 }; 54 55 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma) 56 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod) 57 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en) 58 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp) 59 60 /* for DEBUG */ 61 static struct rsnd_mod_ops mem_ops = { 62 .name = "mem", 63 }; 64 65 static struct rsnd_mod mem = { 66 }; 67 68 /* 69 * Audio DMAC 70 */ 71 static void __rsnd_dmaen_complete(struct rsnd_mod *mod, 72 struct rsnd_dai_stream *io) 73 { 74 if (rsnd_io_is_working(io)) 75 rsnd_dai_period_elapsed(io); 76 } 77 78 static void rsnd_dmaen_complete(void *data) 79 { 80 struct rsnd_mod *mod = data; 81 82 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete); 83 } 84 85 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io, 86 struct rsnd_mod *mod_from, 87 struct rsnd_mod *mod_to) 88 { 89 if ((!mod_from && !mod_to) || 90 (mod_from && mod_to)) 91 return NULL; 92 93 if (mod_from) 94 return rsnd_mod_dma_req(io, mod_from); 95 else 96 return rsnd_mod_dma_req(io, mod_to); 97 } 98 99 static int rsnd_dmaen_stop(struct rsnd_mod *mod, 100 struct rsnd_dai_stream *io, 101 struct rsnd_priv *priv) 102 { 103 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 104 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 105 106 if (dmaen->chan) 107 dmaengine_terminate_all(dmaen->chan); 108 109 return 0; 110 } 111 112 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod, 113 struct rsnd_dai_stream *io, 114 struct rsnd_priv *priv) 115 { 116 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 117 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 118 119 /* 120 * DMAEngine release uses mutex lock. 121 * Thus, it shouldn't be called under spinlock. 122 * Let's call it under nolock_start 123 */ 124 if (dmaen->chan) 125 dma_release_channel(dmaen->chan); 126 127 dmaen->chan = NULL; 128 129 return 0; 130 } 131 132 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod, 133 struct rsnd_dai_stream *io, 134 struct rsnd_priv *priv) 135 { 136 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 137 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 138 struct device *dev = rsnd_priv_to_dev(priv); 139 140 if (dmaen->chan) { 141 dev_err(dev, "it already has dma channel\n"); 142 return -EIO; 143 } 144 145 /* 146 * DMAEngine request uses mutex lock. 147 * Thus, it shouldn't be called under spinlock. 148 * Let's call it under nolock_start 149 */ 150 dmaen->chan = rsnd_dmaen_request_channel(io, 151 dma->mod_from, 152 dma->mod_to); 153 if (IS_ERR_OR_NULL(dmaen->chan)) { 154 dmaen->chan = NULL; 155 dev_err(dev, "can't get dma channel\n"); 156 return -EIO; 157 } 158 159 return 0; 160 } 161 162 static int rsnd_dmaen_start(struct rsnd_mod *mod, 163 struct rsnd_dai_stream *io, 164 struct rsnd_priv *priv) 165 { 166 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 167 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 168 struct snd_pcm_substream *substream = io->substream; 169 struct device *dev = rsnd_priv_to_dev(priv); 170 struct dma_async_tx_descriptor *desc; 171 struct dma_slave_config cfg = {}; 172 int is_play = rsnd_io_is_play(io); 173 int ret; 174 175 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 176 cfg.src_addr = dma->src_addr; 177 cfg.dst_addr = dma->dst_addr; 178 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 179 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 180 181 dev_dbg(dev, "%s[%d] %pad -> %pad\n", 182 rsnd_mod_name(mod), rsnd_mod_id(mod), 183 &cfg.src_addr, &cfg.dst_addr); 184 185 ret = dmaengine_slave_config(dmaen->chan, &cfg); 186 if (ret < 0) 187 return ret; 188 189 desc = dmaengine_prep_dma_cyclic(dmaen->chan, 190 substream->runtime->dma_addr, 191 snd_pcm_lib_buffer_bytes(substream), 192 snd_pcm_lib_period_bytes(substream), 193 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 194 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 195 196 if (!desc) { 197 dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); 198 return -EIO; 199 } 200 201 desc->callback = rsnd_dmaen_complete; 202 desc->callback_param = rsnd_mod_get(dma); 203 204 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream); 205 206 dmaen->cookie = dmaengine_submit(desc); 207 if (dmaen->cookie < 0) { 208 dev_err(dev, "dmaengine_submit() fail\n"); 209 return -EIO; 210 } 211 212 dma_async_issue_pending(dmaen->chan); 213 214 return 0; 215 } 216 217 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, 218 struct rsnd_mod *mod, char *name) 219 { 220 struct dma_chan *chan = NULL; 221 struct device_node *np; 222 int i = 0; 223 224 for_each_child_of_node(of_node, np) { 225 if (i == rsnd_mod_id(mod) && (!chan)) 226 chan = of_dma_request_slave_channel(np, name); 227 i++; 228 } 229 230 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */ 231 of_node_put(of_node); 232 233 return chan; 234 } 235 236 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, 237 struct rsnd_dma *dma, 238 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 239 { 240 struct rsnd_priv *priv = rsnd_io_to_priv(io); 241 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 242 struct dma_chan *chan; 243 244 /* try to get DMAEngine channel */ 245 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); 246 if (IS_ERR_OR_NULL(chan)) { 247 /* 248 * DMA failed. try to PIO mode 249 * see 250 * rsnd_ssi_fallback() 251 * rsnd_rdai_continuance_probe() 252 */ 253 return -EAGAIN; 254 } 255 256 /* 257 * use it for IPMMU if needed 258 * see 259 * rsnd_preallocate_pages() 260 */ 261 io->dmac_dev = chan->device->dev; 262 263 dma_release_channel(chan); 264 265 dmac->dmaen_num++; 266 267 return 0; 268 } 269 270 static int rsnd_dmaen_pointer(struct rsnd_mod *mod, 271 struct rsnd_dai_stream *io, 272 snd_pcm_uframes_t *pointer) 273 { 274 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 275 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 276 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 277 struct dma_tx_state state; 278 enum dma_status status; 279 unsigned int pos = 0; 280 281 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state); 282 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { 283 if (state.residue > 0 && state.residue <= dmaen->dma_len) 284 pos = dmaen->dma_len - state.residue; 285 } 286 *pointer = bytes_to_frames(runtime, pos); 287 288 return 0; 289 } 290 291 static struct rsnd_mod_ops rsnd_dmaen_ops = { 292 .name = "audmac", 293 .nolock_start = rsnd_dmaen_nolock_start, 294 .nolock_stop = rsnd_dmaen_nolock_stop, 295 .start = rsnd_dmaen_start, 296 .stop = rsnd_dmaen_stop, 297 .pointer= rsnd_dmaen_pointer, 298 }; 299 300 /* 301 * Audio DMAC peri peri 302 */ 303 static const u8 gen2_id_table_ssiu[] = { 304 0x00, /* SSI00 */ 305 0x04, /* SSI10 */ 306 0x08, /* SSI20 */ 307 0x0c, /* SSI3 */ 308 0x0d, /* SSI4 */ 309 0x0e, /* SSI5 */ 310 0x0f, /* SSI6 */ 311 0x10, /* SSI7 */ 312 0x11, /* SSI8 */ 313 0x12, /* SSI90 */ 314 }; 315 static const u8 gen2_id_table_scu[] = { 316 0x2d, /* SCU_SRCI0 */ 317 0x2e, /* SCU_SRCI1 */ 318 0x2f, /* SCU_SRCI2 */ 319 0x30, /* SCU_SRCI3 */ 320 0x31, /* SCU_SRCI4 */ 321 0x32, /* SCU_SRCI5 */ 322 0x33, /* SCU_SRCI6 */ 323 0x34, /* SCU_SRCI7 */ 324 0x35, /* SCU_SRCI8 */ 325 0x36, /* SCU_SRCI9 */ 326 }; 327 static const u8 gen2_id_table_cmd[] = { 328 0x37, /* SCU_CMD0 */ 329 0x38, /* SCU_CMD1 */ 330 }; 331 332 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io, 333 struct rsnd_mod *mod) 334 { 335 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 336 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 337 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 338 const u8 *entry = NULL; 339 int id = rsnd_mod_id(mod); 340 int size = 0; 341 342 if (mod == ssi) { 343 entry = gen2_id_table_ssiu; 344 size = ARRAY_SIZE(gen2_id_table_ssiu); 345 } else if (mod == src) { 346 entry = gen2_id_table_scu; 347 size = ARRAY_SIZE(gen2_id_table_scu); 348 } else if (mod == dvc) { 349 entry = gen2_id_table_cmd; 350 size = ARRAY_SIZE(gen2_id_table_cmd); 351 } 352 353 if ((!entry) || (size <= id)) { 354 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io)); 355 356 dev_err(dev, "unknown connection (%s[%d])\n", 357 rsnd_mod_name(mod), rsnd_mod_id(mod)); 358 359 /* use non-prohibited SRS number as error */ 360 return 0x00; /* SSI00 */ 361 } 362 363 return entry[id]; 364 } 365 366 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io, 367 struct rsnd_mod *mod_from, 368 struct rsnd_mod *mod_to) 369 { 370 return (rsnd_dmapp_get_id(io, mod_from) << 24) + 371 (rsnd_dmapp_get_id(io, mod_to) << 16); 372 } 373 374 #define rsnd_dmapp_addr(dmac, dma, reg) \ 375 (dmac->base + 0x20 + reg + \ 376 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id)) 377 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg) 378 { 379 struct rsnd_mod *mod = rsnd_mod_get(dma); 380 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 381 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 382 struct device *dev = rsnd_priv_to_dev(priv); 383 384 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data); 385 386 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg)); 387 } 388 389 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg) 390 { 391 struct rsnd_mod *mod = rsnd_mod_get(dma); 392 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 393 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 394 395 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 396 } 397 398 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) 399 { 400 struct rsnd_mod *mod = rsnd_mod_get(dma); 401 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 402 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 403 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); 404 u32 val = ioread32(addr); 405 406 val &= ~mask; 407 val |= (data & mask); 408 409 iowrite32(val, addr); 410 } 411 412 static int rsnd_dmapp_stop(struct rsnd_mod *mod, 413 struct rsnd_dai_stream *io, 414 struct rsnd_priv *priv) 415 { 416 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 417 int i; 418 419 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); 420 421 for (i = 0; i < 1024; i++) { 422 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) 423 return 0; 424 udelay(1); 425 } 426 427 return -EIO; 428 } 429 430 static int rsnd_dmapp_start(struct rsnd_mod *mod, 431 struct rsnd_dai_stream *io, 432 struct rsnd_priv *priv) 433 { 434 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 435 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 436 437 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR); 438 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR); 439 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR); 440 441 return 0; 442 } 443 444 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io, 445 struct rsnd_dma *dma, 446 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 447 { 448 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 449 struct rsnd_priv *priv = rsnd_io_to_priv(io); 450 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 451 struct device *dev = rsnd_priv_to_dev(priv); 452 453 dmapp->dmapp_id = dmac->dmapp_num; 454 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE; 455 456 dmac->dmapp_num++; 457 458 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n", 459 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr); 460 461 return 0; 462 } 463 464 static struct rsnd_mod_ops rsnd_dmapp_ops = { 465 .name = "audmac-pp", 466 .start = rsnd_dmapp_start, 467 .stop = rsnd_dmapp_stop, 468 .quit = rsnd_dmapp_stop, 469 }; 470 471 /* 472 * Common DMAC Interface 473 */ 474 475 /* 476 * DMA read/write register offset 477 * 478 * RSND_xxx_I_N for Audio DMAC input 479 * RSND_xxx_O_N for Audio DMAC output 480 * RSND_xxx_I_P for Audio DMAC peri peri input 481 * RSND_xxx_O_P for Audio DMAC peri peri output 482 * 483 * ex) R-Car H2 case 484 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out 485 * SSI : 0xec541000 / 0xec241008 / 0xec24100c 486 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000 487 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000 488 * CMD : 0xec500000 / / 0xec008000 0xec308000 489 */ 490 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8) 491 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc) 492 493 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 494 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 495 496 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 497 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 498 499 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i)) 500 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i)) 501 502 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i)) 503 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i)) 504 505 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i)) 506 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i)) 507 508 static dma_addr_t 509 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io, 510 struct rsnd_mod *mod, 511 int is_play, int is_from) 512 { 513 struct rsnd_priv *priv = rsnd_io_to_priv(io); 514 struct device *dev = rsnd_priv_to_dev(priv); 515 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI); 516 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU); 517 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); 518 int use_src = !!rsnd_io_to_mod_src(io); 519 int use_cmd = !!rsnd_io_to_mod_dvc(io) || 520 !!rsnd_io_to_mod_mix(io) || 521 !!rsnd_io_to_mod_ctu(io); 522 int id = rsnd_mod_id(mod); 523 struct dma_addr { 524 dma_addr_t out_addr; 525 dma_addr_t in_addr; 526 } dma_addrs[3][2][3] = { 527 /* SRC */ 528 /* Capture */ 529 {{{ 0, 0 }, 530 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) }, 531 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } }, 532 /* Playback */ 533 {{ 0, 0, }, 534 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) }, 535 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } } 536 }, 537 /* SSI */ 538 /* Capture */ 539 {{{ RDMA_SSI_O_N(ssi, id), 0 }, 540 { RDMA_SSIU_O_P(ssi, id), 0 }, 541 { RDMA_SSIU_O_P(ssi, id), 0 } }, 542 /* Playback */ 543 {{ 0, RDMA_SSI_I_N(ssi, id) }, 544 { 0, RDMA_SSIU_I_P(ssi, id) }, 545 { 0, RDMA_SSIU_I_P(ssi, id) } } 546 }, 547 /* SSIU */ 548 /* Capture */ 549 {{{ RDMA_SSIU_O_N(ssi, id), 0 }, 550 { RDMA_SSIU_O_P(ssi, id), 0 }, 551 { RDMA_SSIU_O_P(ssi, id), 0 } }, 552 /* Playback */ 553 {{ 0, RDMA_SSIU_I_N(ssi, id) }, 554 { 0, RDMA_SSIU_I_P(ssi, id) }, 555 { 0, RDMA_SSIU_I_P(ssi, id) } } }, 556 }; 557 558 /* it shouldn't happen */ 559 if (use_cmd && !use_src) 560 dev_err(dev, "DVC is selected without SRC\n"); 561 562 /* use SSIU or SSI ? */ 563 if (is_ssi && rsnd_ssi_use_busif(io)) 564 is_ssi++; 565 566 return (is_from) ? 567 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr : 568 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr; 569 } 570 571 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io, 572 struct rsnd_mod *mod, 573 int is_play, int is_from) 574 { 575 struct rsnd_priv *priv = rsnd_io_to_priv(io); 576 577 /* 578 * gen1 uses default DMA addr 579 */ 580 if (rsnd_is_gen1(priv)) 581 return 0; 582 583 if (!mod) 584 return 0; 585 586 return rsnd_gen2_dma_addr(io, mod, is_play, is_from); 587 } 588 589 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */ 590 static void rsnd_dma_of_path(struct rsnd_mod *this, 591 struct rsnd_dai_stream *io, 592 int is_play, 593 struct rsnd_mod **mod_from, 594 struct rsnd_mod **mod_to) 595 { 596 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 597 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 598 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io); 599 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 600 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 601 struct rsnd_mod *mod[MOD_MAX]; 602 struct rsnd_mod *mod_start, *mod_end; 603 struct rsnd_priv *priv = rsnd_mod_to_priv(this); 604 struct device *dev = rsnd_priv_to_dev(priv); 605 int nr, i, idx; 606 607 if (!ssi) 608 return; 609 610 nr = 0; 611 for (i = 0; i < MOD_MAX; i++) { 612 mod[i] = NULL; 613 nr += !!rsnd_io_to_mod(io, i); 614 } 615 616 /* 617 * [S] -*-> [E] 618 * [S] -*-> SRC -o-> [E] 619 * [S] -*-> SRC -> DVC -o-> [E] 620 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E] 621 * 622 * playback [S] = mem 623 * [E] = SSI 624 * 625 * capture [S] = SSI 626 * [E] = mem 627 * 628 * -*-> Audio DMAC 629 * -o-> Audio DMAC peri peri 630 */ 631 mod_start = (is_play) ? NULL : ssi; 632 mod_end = (is_play) ? ssi : NULL; 633 634 idx = 0; 635 mod[idx++] = mod_start; 636 for (i = 1; i < nr; i++) { 637 if (src) { 638 mod[idx++] = src; 639 src = NULL; 640 } else if (ctu) { 641 mod[idx++] = ctu; 642 ctu = NULL; 643 } else if (mix) { 644 mod[idx++] = mix; 645 mix = NULL; 646 } else if (dvc) { 647 mod[idx++] = dvc; 648 dvc = NULL; 649 } 650 } 651 mod[idx] = mod_end; 652 653 /* 654 * | SSI | SRC | 655 * -------------+-----+-----+ 656 * is_play | o | * | 657 * !is_play | * | o | 658 */ 659 if ((this == ssi) == (is_play)) { 660 *mod_from = mod[idx - 1]; 661 *mod_to = mod[idx]; 662 } else { 663 *mod_from = mod[0]; 664 *mod_to = mod[1]; 665 } 666 667 dev_dbg(dev, "module connection (this is %s[%d])\n", 668 rsnd_mod_name(this), rsnd_mod_id(this)); 669 for (i = 0; i <= idx; i++) { 670 dev_dbg(dev, " %s[%d]%s\n", 671 rsnd_mod_name(mod[i] ? mod[i] : &mem), 672 rsnd_mod_id (mod[i] ? mod[i] : &mem), 673 (mod[i] == *mod_from) ? " from" : 674 (mod[i] == *mod_to) ? " to" : ""); 675 } 676 } 677 678 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod, 679 struct rsnd_mod **dma_mod) 680 { 681 struct rsnd_mod *mod_from = NULL; 682 struct rsnd_mod *mod_to = NULL; 683 struct rsnd_priv *priv = rsnd_io_to_priv(io); 684 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 685 struct device *dev = rsnd_priv_to_dev(priv); 686 struct rsnd_dma *dma; 687 struct rsnd_mod_ops *ops; 688 enum rsnd_mod_type type; 689 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, 690 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to); 691 int is_play = rsnd_io_is_play(io); 692 int ret, dma_id; 693 694 /* 695 * DMA failed. try to PIO mode 696 * see 697 * rsnd_ssi_fallback() 698 * rsnd_rdai_continuance_probe() 699 */ 700 if (!dmac) 701 return -EAGAIN; 702 703 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to); 704 705 /* for Gen2 or later */ 706 if (mod_from && mod_to) { 707 ops = &rsnd_dmapp_ops; 708 attach = rsnd_dmapp_attach; 709 dma_id = dmac->dmapp_num; 710 type = RSND_MOD_AUDMAPP; 711 } else { 712 ops = &rsnd_dmaen_ops; 713 attach = rsnd_dmaen_attach; 714 dma_id = dmac->dmaen_num; 715 type = RSND_MOD_AUDMA; 716 } 717 718 /* for Gen1, overwrite */ 719 if (rsnd_is_gen1(priv)) { 720 ops = &rsnd_dmaen_ops; 721 attach = rsnd_dmaen_attach; 722 dma_id = dmac->dmaen_num; 723 type = RSND_MOD_AUDMA; 724 } 725 726 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 727 if (!dma) 728 return -ENOMEM; 729 730 *dma_mod = rsnd_mod_get(dma); 731 732 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL, 733 rsnd_mod_get_status, type, dma_id); 734 if (ret < 0) 735 return ret; 736 737 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n", 738 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod), 739 rsnd_mod_name(mod_from ? mod_from : &mem), 740 rsnd_mod_id (mod_from ? mod_from : &mem), 741 rsnd_mod_name(mod_to ? mod_to : &mem), 742 rsnd_mod_id (mod_to ? mod_to : &mem)); 743 744 ret = attach(io, dma, mod_from, mod_to); 745 if (ret < 0) 746 return ret; 747 748 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1); 749 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0); 750 dma->mod_from = mod_from; 751 dma->mod_to = mod_to; 752 753 return 0; 754 } 755 756 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod, 757 struct rsnd_mod **dma_mod) 758 { 759 if (!(*dma_mod)) { 760 int ret = rsnd_dma_alloc(io, mod, dma_mod); 761 762 if (ret < 0) 763 return ret; 764 } 765 766 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type); 767 } 768 769 int rsnd_dma_probe(struct rsnd_priv *priv) 770 { 771 struct platform_device *pdev = rsnd_priv_to_pdev(priv); 772 struct device *dev = rsnd_priv_to_dev(priv); 773 struct rsnd_dma_ctrl *dmac; 774 struct resource *res; 775 776 /* 777 * for Gen1 778 */ 779 if (rsnd_is_gen1(priv)) 780 return 0; 781 782 /* 783 * for Gen2 or later 784 */ 785 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp"); 786 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL); 787 if (!dmac || !res) { 788 dev_err(dev, "dma allocate failed\n"); 789 return 0; /* it will be PIO mode */ 790 } 791 792 dmac->dmapp_num = 0; 793 dmac->base = devm_ioremap_resource(dev, res); 794 if (IS_ERR(dmac->base)) 795 return PTR_ERR(dmac->base); 796 797 priv->dma = dmac; 798 799 /* dummy mem mod for debug */ 800 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0); 801 } 802