1 /* 2 * Renesas R-Car Audio DMAC support 3 * 4 * Copyright (C) 2015 Renesas Electronics Corp. 5 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/delay.h> 12 #include <linux/of_dma.h> 13 #include "rsnd.h" 14 15 /* 16 * Audio DMAC peri peri register 17 */ 18 #define PDMASAR 0x00 19 #define PDMADAR 0x04 20 #define PDMACHCR 0x0c 21 22 /* PDMACHCR */ 23 #define PDMACHCR_DE (1 << 0) 24 25 26 struct rsnd_dmaen { 27 struct dma_chan *chan; 28 dma_cookie_t cookie; 29 dma_addr_t dma_buf; 30 unsigned int dma_len; 31 unsigned int dma_period; 32 unsigned int dma_cnt; 33 }; 34 35 struct rsnd_dmapp { 36 int dmapp_id; 37 u32 chcr; 38 }; 39 40 struct rsnd_dma { 41 struct rsnd_mod mod; 42 struct rsnd_mod *mod_from; 43 struct rsnd_mod *mod_to; 44 dma_addr_t src_addr; 45 dma_addr_t dst_addr; 46 union { 47 struct rsnd_dmaen en; 48 struct rsnd_dmapp pp; 49 } dma; 50 }; 51 52 struct rsnd_dma_ctrl { 53 void __iomem *base; 54 int dmaen_num; 55 int dmapp_num; 56 }; 57 58 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma) 59 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod) 60 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en) 61 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp) 62 63 /* for DEBUG */ 64 static struct rsnd_mod_ops mem_ops = { 65 .name = "mem", 66 }; 67 68 static struct rsnd_mod mem = { 69 }; 70 71 /* 72 * Audio DMAC 73 */ 74 #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1) 75 #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0) 76 static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io, 77 int i, int sync) 78 { 79 struct device *dev = dmaen->chan->device->dev; 80 enum dma_data_direction dir; 81 int is_play = rsnd_io_is_play(io); 82 dma_addr_t buf; 83 int len, max; 84 size_t period; 85 86 len = dmaen->dma_len; 87 period = dmaen->dma_period; 88 max = len / period; 89 i = i % max; 90 buf = dmaen->dma_buf + (period * i); 91 92 dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 93 94 if (sync) 95 dma_sync_single_for_device(dev, buf, period, dir); 96 else 97 dma_sync_single_for_cpu(dev, buf, period, dir); 98 } 99 100 static void __rsnd_dmaen_complete(struct rsnd_mod *mod, 101 struct rsnd_dai_stream *io) 102 { 103 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 104 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 105 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 106 bool elapsed = false; 107 unsigned long flags; 108 109 /* 110 * Renesas sound Gen1 needs 1 DMAC, 111 * Gen2 needs 2 DMAC. 112 * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri. 113 * But, Audio-DMAC-peri-peri doesn't have interrupt, 114 * and this driver is assuming that here. 115 */ 116 spin_lock_irqsave(&priv->lock, flags); 117 118 if (rsnd_io_is_working(io)) { 119 rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt); 120 121 /* 122 * Next period is already started. 123 * Let's sync Next Next period 124 * see 125 * rsnd_dmaen_start() 126 */ 127 rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2); 128 129 elapsed = true; 130 131 dmaen->dma_cnt++; 132 } 133 134 spin_unlock_irqrestore(&priv->lock, flags); 135 136 if (elapsed) 137 rsnd_dai_period_elapsed(io); 138 } 139 140 static void rsnd_dmaen_complete(void *data) 141 { 142 struct rsnd_mod *mod = data; 143 144 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete); 145 } 146 147 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io, 148 struct rsnd_mod *mod_from, 149 struct rsnd_mod *mod_to) 150 { 151 if ((!mod_from && !mod_to) || 152 (mod_from && mod_to)) 153 return NULL; 154 155 if (mod_from) 156 return rsnd_mod_dma_req(io, mod_from); 157 else 158 return rsnd_mod_dma_req(io, mod_to); 159 } 160 161 static int rsnd_dmaen_stop(struct rsnd_mod *mod, 162 struct rsnd_dai_stream *io, 163 struct rsnd_priv *priv) 164 { 165 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 166 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 167 168 if (dmaen->chan) { 169 int is_play = rsnd_io_is_play(io); 170 171 dmaengine_terminate_all(dmaen->chan); 172 dma_unmap_single(dmaen->chan->device->dev, 173 dmaen->dma_buf, dmaen->dma_len, 174 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 175 } 176 177 return 0; 178 } 179 180 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod, 181 struct rsnd_dai_stream *io, 182 struct rsnd_priv *priv) 183 { 184 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 185 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 186 187 /* 188 * DMAEngine release uses mutex lock. 189 * Thus, it shouldn't be called under spinlock. 190 * Let's call it under nolock_start 191 */ 192 if (dmaen->chan) 193 dma_release_channel(dmaen->chan); 194 195 dmaen->chan = NULL; 196 197 return 0; 198 } 199 200 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod, 201 struct rsnd_dai_stream *io, 202 struct rsnd_priv *priv) 203 { 204 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 205 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 206 struct device *dev = rsnd_priv_to_dev(priv); 207 208 if (dmaen->chan) { 209 dev_err(dev, "it already has dma channel\n"); 210 return -EIO; 211 } 212 213 /* 214 * DMAEngine request uses mutex lock. 215 * Thus, it shouldn't be called under spinlock. 216 * Let's call it under nolock_start 217 */ 218 dmaen->chan = rsnd_dmaen_request_channel(io, 219 dma->mod_from, 220 dma->mod_to); 221 if (IS_ERR_OR_NULL(dmaen->chan)) { 222 dmaen->chan = NULL; 223 dev_err(dev, "can't get dma channel\n"); 224 return -EIO; 225 } 226 227 return 0; 228 } 229 230 static int rsnd_dmaen_start(struct rsnd_mod *mod, 231 struct rsnd_dai_stream *io, 232 struct rsnd_priv *priv) 233 { 234 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 235 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 236 struct snd_pcm_substream *substream = io->substream; 237 struct device *dev = rsnd_priv_to_dev(priv); 238 struct dma_async_tx_descriptor *desc; 239 struct dma_slave_config cfg = {}; 240 dma_addr_t buf; 241 size_t len; 242 size_t period; 243 int is_play = rsnd_io_is_play(io); 244 int i; 245 int ret; 246 247 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 248 cfg.src_addr = dma->src_addr; 249 cfg.dst_addr = dma->dst_addr; 250 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 251 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 252 253 dev_dbg(dev, "%s[%d] %pad -> %pad\n", 254 rsnd_mod_name(mod), rsnd_mod_id(mod), 255 &cfg.src_addr, &cfg.dst_addr); 256 257 ret = dmaengine_slave_config(dmaen->chan, &cfg); 258 if (ret < 0) 259 return ret; 260 261 len = snd_pcm_lib_buffer_bytes(substream); 262 period = snd_pcm_lib_period_bytes(substream); 263 buf = dma_map_single(dmaen->chan->device->dev, 264 substream->runtime->dma_area, 265 len, 266 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 267 if (dma_mapping_error(dmaen->chan->device->dev, buf)) { 268 dev_err(dev, "dma map failed\n"); 269 return -EIO; 270 } 271 272 desc = dmaengine_prep_dma_cyclic(dmaen->chan, 273 buf, len, period, 274 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 275 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 276 277 if (!desc) { 278 dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); 279 return -EIO; 280 } 281 282 desc->callback = rsnd_dmaen_complete; 283 desc->callback_param = rsnd_mod_get(dma); 284 285 dmaen->dma_buf = buf; 286 dmaen->dma_len = len; 287 dmaen->dma_period = period; 288 dmaen->dma_cnt = 0; 289 290 /* 291 * synchronize this and next period 292 * see 293 * __rsnd_dmaen_complete() 294 */ 295 for (i = 0; i < 2; i++) 296 rsnd_dmaen_sync(dmaen, io, i); 297 298 dmaen->cookie = dmaengine_submit(desc); 299 if (dmaen->cookie < 0) { 300 dev_err(dev, "dmaengine_submit() fail\n"); 301 return -EIO; 302 } 303 304 dma_async_issue_pending(dmaen->chan); 305 306 return 0; 307 } 308 309 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, 310 struct rsnd_mod *mod, char *name) 311 { 312 struct dma_chan *chan = NULL; 313 struct device_node *np; 314 int i = 0; 315 316 for_each_child_of_node(of_node, np) { 317 if (i == rsnd_mod_id(mod) && (!chan)) 318 chan = of_dma_request_slave_channel(np, name); 319 i++; 320 } 321 322 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */ 323 of_node_put(of_node); 324 325 return chan; 326 } 327 328 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, 329 struct rsnd_dma *dma, 330 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 331 { 332 struct rsnd_priv *priv = rsnd_io_to_priv(io); 333 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 334 struct dma_chan *chan; 335 336 /* try to get DMAEngine channel */ 337 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); 338 if (IS_ERR_OR_NULL(chan)) { 339 /* 340 * DMA failed. try to PIO mode 341 * see 342 * rsnd_ssi_fallback() 343 * rsnd_rdai_continuance_probe() 344 */ 345 return -EAGAIN; 346 } 347 348 dma_release_channel(chan); 349 350 dmac->dmaen_num++; 351 352 return 0; 353 } 354 355 static int rsnd_dmaen_pointer(struct rsnd_mod *mod, 356 struct rsnd_dai_stream *io, 357 snd_pcm_uframes_t *pointer) 358 { 359 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 360 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 361 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 362 struct dma_tx_state state; 363 enum dma_status status; 364 unsigned int pos = 0; 365 366 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state); 367 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { 368 if (state.residue > 0 && state.residue <= dmaen->dma_len) 369 pos = dmaen->dma_len - state.residue; 370 } 371 *pointer = bytes_to_frames(runtime, pos); 372 373 return 0; 374 } 375 376 static struct rsnd_mod_ops rsnd_dmaen_ops = { 377 .name = "audmac", 378 .nolock_start = rsnd_dmaen_nolock_start, 379 .nolock_stop = rsnd_dmaen_nolock_stop, 380 .start = rsnd_dmaen_start, 381 .stop = rsnd_dmaen_stop, 382 .pointer= rsnd_dmaen_pointer, 383 }; 384 385 /* 386 * Audio DMAC peri peri 387 */ 388 static const u8 gen2_id_table_ssiu[] = { 389 0x00, /* SSI00 */ 390 0x04, /* SSI10 */ 391 0x08, /* SSI20 */ 392 0x0c, /* SSI3 */ 393 0x0d, /* SSI4 */ 394 0x0e, /* SSI5 */ 395 0x0f, /* SSI6 */ 396 0x10, /* SSI7 */ 397 0x11, /* SSI8 */ 398 0x12, /* SSI90 */ 399 }; 400 static const u8 gen2_id_table_scu[] = { 401 0x2d, /* SCU_SRCI0 */ 402 0x2e, /* SCU_SRCI1 */ 403 0x2f, /* SCU_SRCI2 */ 404 0x30, /* SCU_SRCI3 */ 405 0x31, /* SCU_SRCI4 */ 406 0x32, /* SCU_SRCI5 */ 407 0x33, /* SCU_SRCI6 */ 408 0x34, /* SCU_SRCI7 */ 409 0x35, /* SCU_SRCI8 */ 410 0x36, /* SCU_SRCI9 */ 411 }; 412 static const u8 gen2_id_table_cmd[] = { 413 0x37, /* SCU_CMD0 */ 414 0x38, /* SCU_CMD1 */ 415 }; 416 417 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io, 418 struct rsnd_mod *mod) 419 { 420 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 421 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 422 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 423 const u8 *entry = NULL; 424 int id = rsnd_mod_id(mod); 425 int size = 0; 426 427 if (mod == ssi) { 428 entry = gen2_id_table_ssiu; 429 size = ARRAY_SIZE(gen2_id_table_ssiu); 430 } else if (mod == src) { 431 entry = gen2_id_table_scu; 432 size = ARRAY_SIZE(gen2_id_table_scu); 433 } else if (mod == dvc) { 434 entry = gen2_id_table_cmd; 435 size = ARRAY_SIZE(gen2_id_table_cmd); 436 } 437 438 if ((!entry) || (size <= id)) { 439 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io)); 440 441 dev_err(dev, "unknown connection (%s[%d])\n", 442 rsnd_mod_name(mod), rsnd_mod_id(mod)); 443 444 /* use non-prohibited SRS number as error */ 445 return 0x00; /* SSI00 */ 446 } 447 448 return entry[id]; 449 } 450 451 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io, 452 struct rsnd_mod *mod_from, 453 struct rsnd_mod *mod_to) 454 { 455 return (rsnd_dmapp_get_id(io, mod_from) << 24) + 456 (rsnd_dmapp_get_id(io, mod_to) << 16); 457 } 458 459 #define rsnd_dmapp_addr(dmac, dma, reg) \ 460 (dmac->base + 0x20 + reg + \ 461 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id)) 462 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg) 463 { 464 struct rsnd_mod *mod = rsnd_mod_get(dma); 465 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 466 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 467 struct device *dev = rsnd_priv_to_dev(priv); 468 469 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data); 470 471 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg)); 472 } 473 474 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg) 475 { 476 struct rsnd_mod *mod = rsnd_mod_get(dma); 477 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 478 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 479 480 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 481 } 482 483 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) 484 { 485 struct rsnd_mod *mod = rsnd_mod_get(dma); 486 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 487 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 488 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); 489 u32 val = ioread32(addr); 490 491 val &= ~mask; 492 val |= (data & mask); 493 494 iowrite32(val, addr); 495 } 496 497 static int rsnd_dmapp_stop(struct rsnd_mod *mod, 498 struct rsnd_dai_stream *io, 499 struct rsnd_priv *priv) 500 { 501 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 502 int i; 503 504 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); 505 506 for (i = 0; i < 1024; i++) { 507 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) 508 return 0; 509 udelay(1); 510 } 511 512 return -EIO; 513 } 514 515 static int rsnd_dmapp_start(struct rsnd_mod *mod, 516 struct rsnd_dai_stream *io, 517 struct rsnd_priv *priv) 518 { 519 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 520 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 521 522 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR); 523 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR); 524 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR); 525 526 return 0; 527 } 528 529 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io, 530 struct rsnd_dma *dma, 531 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 532 { 533 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 534 struct rsnd_priv *priv = rsnd_io_to_priv(io); 535 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 536 struct device *dev = rsnd_priv_to_dev(priv); 537 538 dmapp->dmapp_id = dmac->dmapp_num; 539 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE; 540 541 dmac->dmapp_num++; 542 543 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n", 544 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr); 545 546 return 0; 547 } 548 549 static struct rsnd_mod_ops rsnd_dmapp_ops = { 550 .name = "audmac-pp", 551 .start = rsnd_dmapp_start, 552 .stop = rsnd_dmapp_stop, 553 .quit = rsnd_dmapp_stop, 554 }; 555 556 /* 557 * Common DMAC Interface 558 */ 559 560 /* 561 * DMA read/write register offset 562 * 563 * RSND_xxx_I_N for Audio DMAC input 564 * RSND_xxx_O_N for Audio DMAC output 565 * RSND_xxx_I_P for Audio DMAC peri peri input 566 * RSND_xxx_O_P for Audio DMAC peri peri output 567 * 568 * ex) R-Car H2 case 569 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out 570 * SSI : 0xec541000 / 0xec241008 / 0xec24100c 571 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000 572 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000 573 * CMD : 0xec500000 / / 0xec008000 0xec308000 574 */ 575 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8) 576 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc) 577 578 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 579 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 580 581 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 582 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 583 584 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i)) 585 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i)) 586 587 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i)) 588 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i)) 589 590 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i)) 591 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i)) 592 593 static dma_addr_t 594 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io, 595 struct rsnd_mod *mod, 596 int is_play, int is_from) 597 { 598 struct rsnd_priv *priv = rsnd_io_to_priv(io); 599 struct device *dev = rsnd_priv_to_dev(priv); 600 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI); 601 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU); 602 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); 603 int use_src = !!rsnd_io_to_mod_src(io); 604 int use_cmd = !!rsnd_io_to_mod_dvc(io) || 605 !!rsnd_io_to_mod_mix(io) || 606 !!rsnd_io_to_mod_ctu(io); 607 int id = rsnd_mod_id(mod); 608 struct dma_addr { 609 dma_addr_t out_addr; 610 dma_addr_t in_addr; 611 } dma_addrs[3][2][3] = { 612 /* SRC */ 613 /* Capture */ 614 {{{ 0, 0 }, 615 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) }, 616 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } }, 617 /* Playback */ 618 {{ 0, 0, }, 619 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) }, 620 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } } 621 }, 622 /* SSI */ 623 /* Capture */ 624 {{{ RDMA_SSI_O_N(ssi, id), 0 }, 625 { RDMA_SSIU_O_P(ssi, id), 0 }, 626 { RDMA_SSIU_O_P(ssi, id), 0 } }, 627 /* Playback */ 628 {{ 0, RDMA_SSI_I_N(ssi, id) }, 629 { 0, RDMA_SSIU_I_P(ssi, id) }, 630 { 0, RDMA_SSIU_I_P(ssi, id) } } 631 }, 632 /* SSIU */ 633 /* Capture */ 634 {{{ RDMA_SSIU_O_N(ssi, id), 0 }, 635 { RDMA_SSIU_O_P(ssi, id), 0 }, 636 { RDMA_SSIU_O_P(ssi, id), 0 } }, 637 /* Playback */ 638 {{ 0, RDMA_SSIU_I_N(ssi, id) }, 639 { 0, RDMA_SSIU_I_P(ssi, id) }, 640 { 0, RDMA_SSIU_I_P(ssi, id) } } }, 641 }; 642 643 /* it shouldn't happen */ 644 if (use_cmd && !use_src) 645 dev_err(dev, "DVC is selected without SRC\n"); 646 647 /* use SSIU or SSI ? */ 648 if (is_ssi && rsnd_ssi_use_busif(io)) 649 is_ssi++; 650 651 return (is_from) ? 652 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr : 653 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr; 654 } 655 656 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io, 657 struct rsnd_mod *mod, 658 int is_play, int is_from) 659 { 660 struct rsnd_priv *priv = rsnd_io_to_priv(io); 661 662 /* 663 * gen1 uses default DMA addr 664 */ 665 if (rsnd_is_gen1(priv)) 666 return 0; 667 668 if (!mod) 669 return 0; 670 671 return rsnd_gen2_dma_addr(io, mod, is_play, is_from); 672 } 673 674 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */ 675 static void rsnd_dma_of_path(struct rsnd_mod *this, 676 struct rsnd_dai_stream *io, 677 int is_play, 678 struct rsnd_mod **mod_from, 679 struct rsnd_mod **mod_to) 680 { 681 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 682 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 683 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io); 684 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 685 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 686 struct rsnd_mod *mod[MOD_MAX]; 687 struct rsnd_mod *mod_start, *mod_end; 688 struct rsnd_priv *priv = rsnd_mod_to_priv(this); 689 struct device *dev = rsnd_priv_to_dev(priv); 690 int nr, i, idx; 691 692 if (!ssi) 693 return; 694 695 nr = 0; 696 for (i = 0; i < MOD_MAX; i++) { 697 mod[i] = NULL; 698 nr += !!rsnd_io_to_mod(io, i); 699 } 700 701 /* 702 * [S] -*-> [E] 703 * [S] -*-> SRC -o-> [E] 704 * [S] -*-> SRC -> DVC -o-> [E] 705 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E] 706 * 707 * playback [S] = mem 708 * [E] = SSI 709 * 710 * capture [S] = SSI 711 * [E] = mem 712 * 713 * -*-> Audio DMAC 714 * -o-> Audio DMAC peri peri 715 */ 716 mod_start = (is_play) ? NULL : ssi; 717 mod_end = (is_play) ? ssi : NULL; 718 719 idx = 0; 720 mod[idx++] = mod_start; 721 for (i = 1; i < nr; i++) { 722 if (src) { 723 mod[idx++] = src; 724 src = NULL; 725 } else if (ctu) { 726 mod[idx++] = ctu; 727 ctu = NULL; 728 } else if (mix) { 729 mod[idx++] = mix; 730 mix = NULL; 731 } else if (dvc) { 732 mod[idx++] = dvc; 733 dvc = NULL; 734 } 735 } 736 mod[idx] = mod_end; 737 738 /* 739 * | SSI | SRC | 740 * -------------+-----+-----+ 741 * is_play | o | * | 742 * !is_play | * | o | 743 */ 744 if ((this == ssi) == (is_play)) { 745 *mod_from = mod[idx - 1]; 746 *mod_to = mod[idx]; 747 } else { 748 *mod_from = mod[0]; 749 *mod_to = mod[1]; 750 } 751 752 dev_dbg(dev, "module connection (this is %s[%d])\n", 753 rsnd_mod_name(this), rsnd_mod_id(this)); 754 for (i = 0; i <= idx; i++) { 755 dev_dbg(dev, " %s[%d]%s\n", 756 rsnd_mod_name(mod[i] ? mod[i] : &mem), 757 rsnd_mod_id (mod[i] ? mod[i] : &mem), 758 (mod[i] == *mod_from) ? " from" : 759 (mod[i] == *mod_to) ? " to" : ""); 760 } 761 } 762 763 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod, 764 struct rsnd_mod **dma_mod) 765 { 766 struct rsnd_mod *mod_from = NULL; 767 struct rsnd_mod *mod_to = NULL; 768 struct rsnd_priv *priv = rsnd_io_to_priv(io); 769 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 770 struct device *dev = rsnd_priv_to_dev(priv); 771 struct rsnd_dma *dma; 772 struct rsnd_mod_ops *ops; 773 enum rsnd_mod_type type; 774 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, 775 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to); 776 int is_play = rsnd_io_is_play(io); 777 int ret, dma_id; 778 779 /* 780 * DMA failed. try to PIO mode 781 * see 782 * rsnd_ssi_fallback() 783 * rsnd_rdai_continuance_probe() 784 */ 785 if (!dmac) 786 return -EAGAIN; 787 788 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to); 789 790 /* for Gen2 */ 791 if (mod_from && mod_to) { 792 ops = &rsnd_dmapp_ops; 793 attach = rsnd_dmapp_attach; 794 dma_id = dmac->dmapp_num; 795 type = RSND_MOD_AUDMAPP; 796 } else { 797 ops = &rsnd_dmaen_ops; 798 attach = rsnd_dmaen_attach; 799 dma_id = dmac->dmaen_num; 800 type = RSND_MOD_AUDMA; 801 } 802 803 /* for Gen1, overwrite */ 804 if (rsnd_is_gen1(priv)) { 805 ops = &rsnd_dmaen_ops; 806 attach = rsnd_dmaen_attach; 807 dma_id = dmac->dmaen_num; 808 type = RSND_MOD_AUDMA; 809 } 810 811 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 812 if (!dma) 813 return -ENOMEM; 814 815 *dma_mod = rsnd_mod_get(dma); 816 817 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL, 818 rsnd_mod_get_status, type, dma_id); 819 if (ret < 0) 820 return ret; 821 822 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n", 823 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod), 824 rsnd_mod_name(mod_from ? mod_from : &mem), 825 rsnd_mod_id (mod_from ? mod_from : &mem), 826 rsnd_mod_name(mod_to ? mod_to : &mem), 827 rsnd_mod_id (mod_to ? mod_to : &mem)); 828 829 ret = attach(io, dma, mod_from, mod_to); 830 if (ret < 0) 831 return ret; 832 833 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1); 834 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0); 835 dma->mod_from = mod_from; 836 dma->mod_to = mod_to; 837 838 return 0; 839 } 840 841 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod, 842 struct rsnd_mod **dma_mod) 843 { 844 if (!(*dma_mod)) { 845 int ret = rsnd_dma_alloc(io, mod, dma_mod); 846 847 if (ret < 0) 848 return ret; 849 } 850 851 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type); 852 } 853 854 int rsnd_dma_probe(struct rsnd_priv *priv) 855 { 856 struct platform_device *pdev = rsnd_priv_to_pdev(priv); 857 struct device *dev = rsnd_priv_to_dev(priv); 858 struct rsnd_dma_ctrl *dmac; 859 struct resource *res; 860 861 /* 862 * for Gen1 863 */ 864 if (rsnd_is_gen1(priv)) 865 return 0; 866 867 /* 868 * for Gen2 869 */ 870 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp"); 871 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL); 872 if (!dmac || !res) { 873 dev_err(dev, "dma allocate failed\n"); 874 return 0; /* it will be PIO mode */ 875 } 876 877 dmac->dmapp_num = 0; 878 dmac->base = devm_ioremap_resource(dev, res); 879 if (IS_ERR(dmac->base)) 880 return PTR_ERR(dmac->base); 881 882 priv->dma = dmac; 883 884 /* dummy mem mod for debug */ 885 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0); 886 } 887