1 /* 2 * Renesas R-Car Audio DMAC support 3 * 4 * Copyright (C) 2015 Renesas Electronics Corp. 5 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/delay.h> 12 #include <linux/of_dma.h> 13 #include "rsnd.h" 14 15 /* 16 * Audio DMAC peri peri register 17 */ 18 #define PDMASAR 0x00 19 #define PDMADAR 0x04 20 #define PDMACHCR 0x0c 21 22 /* PDMACHCR */ 23 #define PDMACHCR_DE (1 << 0) 24 25 26 struct rsnd_dmaen { 27 struct dma_chan *chan; 28 dma_cookie_t cookie; 29 dma_addr_t dma_buf; 30 unsigned int dma_len; 31 unsigned int dma_period; 32 unsigned int dma_cnt; 33 }; 34 35 struct rsnd_dmapp { 36 int dmapp_id; 37 u32 chcr; 38 }; 39 40 struct rsnd_dma { 41 struct rsnd_mod mod; 42 struct rsnd_mod *mod_from; 43 struct rsnd_mod *mod_to; 44 dma_addr_t src_addr; 45 dma_addr_t dst_addr; 46 union { 47 struct rsnd_dmaen en; 48 struct rsnd_dmapp pp; 49 } dma; 50 }; 51 52 struct rsnd_dma_ctrl { 53 void __iomem *base; 54 int dmaen_num; 55 int dmapp_num; 56 }; 57 58 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma) 59 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod) 60 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en) 61 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp) 62 63 /* 64 * Audio DMAC 65 */ 66 #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1) 67 #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0) 68 static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io, 69 int i, int sync) 70 { 71 struct device *dev = dmaen->chan->device->dev; 72 enum dma_data_direction dir; 73 int is_play = rsnd_io_is_play(io); 74 dma_addr_t buf; 75 int len, max; 76 size_t period; 77 78 len = dmaen->dma_len; 79 period = dmaen->dma_period; 80 max = len / period; 81 i = i % max; 82 buf = dmaen->dma_buf + (period * i); 83 84 dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 85 86 if (sync) 87 dma_sync_single_for_device(dev, buf, period, dir); 88 else 89 dma_sync_single_for_cpu(dev, buf, period, dir); 90 } 91 92 static void __rsnd_dmaen_complete(struct rsnd_mod *mod, 93 struct rsnd_dai_stream *io) 94 { 95 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 96 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 97 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 98 bool elapsed = false; 99 unsigned long flags; 100 101 /* 102 * Renesas sound Gen1 needs 1 DMAC, 103 * Gen2 needs 2 DMAC. 104 * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri. 105 * But, Audio-DMAC-peri-peri doesn't have interrupt, 106 * and this driver is assuming that here. 107 */ 108 spin_lock_irqsave(&priv->lock, flags); 109 110 if (rsnd_io_is_working(io)) { 111 rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt); 112 113 /* 114 * Next period is already started. 115 * Let's sync Next Next period 116 * see 117 * rsnd_dmaen_start() 118 */ 119 rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2); 120 121 elapsed = true; 122 123 dmaen->dma_cnt++; 124 } 125 126 spin_unlock_irqrestore(&priv->lock, flags); 127 128 if (elapsed) 129 rsnd_dai_period_elapsed(io); 130 } 131 132 static void rsnd_dmaen_complete(void *data) 133 { 134 struct rsnd_mod *mod = data; 135 136 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete); 137 } 138 139 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io, 140 struct rsnd_mod *mod_from, 141 struct rsnd_mod *mod_to) 142 { 143 if ((!mod_from && !mod_to) || 144 (mod_from && mod_to)) 145 return NULL; 146 147 if (mod_from) 148 return rsnd_mod_dma_req(io, mod_from); 149 else 150 return rsnd_mod_dma_req(io, mod_to); 151 } 152 153 static int rsnd_dmaen_stop(struct rsnd_mod *mod, 154 struct rsnd_dai_stream *io, 155 struct rsnd_priv *priv) 156 { 157 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 158 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 159 160 if (dmaen->chan) { 161 int is_play = rsnd_io_is_play(io); 162 163 dmaengine_terminate_all(dmaen->chan); 164 dma_unmap_single(dmaen->chan->device->dev, 165 dmaen->dma_buf, dmaen->dma_len, 166 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 167 } 168 169 return 0; 170 } 171 172 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod, 173 struct rsnd_dai_stream *io, 174 struct rsnd_priv *priv) 175 { 176 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 177 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 178 179 /* 180 * DMAEngine release uses mutex lock. 181 * Thus, it shouldn't be called under spinlock. 182 * Let's call it under nolock_start 183 */ 184 if (dmaen->chan) 185 dma_release_channel(dmaen->chan); 186 187 dmaen->chan = NULL; 188 189 return 0; 190 } 191 192 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod, 193 struct rsnd_dai_stream *io, 194 struct rsnd_priv *priv) 195 { 196 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 197 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 198 struct device *dev = rsnd_priv_to_dev(priv); 199 200 if (dmaen->chan) { 201 dev_err(dev, "it already has dma channel\n"); 202 return -EIO; 203 } 204 205 /* 206 * DMAEngine request uses mutex lock. 207 * Thus, it shouldn't be called under spinlock. 208 * Let's call it under nolock_start 209 */ 210 dmaen->chan = rsnd_dmaen_request_channel(io, 211 dma->mod_from, 212 dma->mod_to); 213 if (IS_ERR_OR_NULL(dmaen->chan)) { 214 int ret = PTR_ERR(dmaen->chan); 215 216 dmaen->chan = NULL; 217 dev_err(dev, "can't get dma channel\n"); 218 return ret; 219 } 220 221 return 0; 222 } 223 224 static int rsnd_dmaen_start(struct rsnd_mod *mod, 225 struct rsnd_dai_stream *io, 226 struct rsnd_priv *priv) 227 { 228 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 229 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 230 struct snd_pcm_substream *substream = io->substream; 231 struct device *dev = rsnd_priv_to_dev(priv); 232 struct dma_async_tx_descriptor *desc; 233 struct dma_slave_config cfg = {}; 234 dma_addr_t buf; 235 size_t len; 236 size_t period; 237 int is_play = rsnd_io_is_play(io); 238 int i; 239 int ret; 240 241 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 242 cfg.src_addr = dma->src_addr; 243 cfg.dst_addr = dma->dst_addr; 244 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 245 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 246 247 dev_dbg(dev, "%s[%d] %pad -> %pad\n", 248 rsnd_mod_name(mod), rsnd_mod_id(mod), 249 &cfg.src_addr, &cfg.dst_addr); 250 251 ret = dmaengine_slave_config(dmaen->chan, &cfg); 252 if (ret < 0) 253 return ret; 254 255 len = snd_pcm_lib_buffer_bytes(substream); 256 period = snd_pcm_lib_period_bytes(substream); 257 buf = dma_map_single(dmaen->chan->device->dev, 258 substream->runtime->dma_area, 259 len, 260 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 261 if (dma_mapping_error(dmaen->chan->device->dev, buf)) { 262 dev_err(dev, "dma map failed\n"); 263 return -EIO; 264 } 265 266 desc = dmaengine_prep_dma_cyclic(dmaen->chan, 267 buf, len, period, 268 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 269 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 270 271 if (!desc) { 272 dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); 273 return -EIO; 274 } 275 276 desc->callback = rsnd_dmaen_complete; 277 desc->callback_param = rsnd_mod_get(dma); 278 279 dmaen->dma_buf = buf; 280 dmaen->dma_len = len; 281 dmaen->dma_period = period; 282 dmaen->dma_cnt = 0; 283 284 /* 285 * synchronize this and next period 286 * see 287 * __rsnd_dmaen_complete() 288 */ 289 for (i = 0; i < 2; i++) 290 rsnd_dmaen_sync(dmaen, io, i); 291 292 dmaen->cookie = dmaengine_submit(desc); 293 if (dmaen->cookie < 0) { 294 dev_err(dev, "dmaengine_submit() fail\n"); 295 return -EIO; 296 } 297 298 dma_async_issue_pending(dmaen->chan); 299 300 return 0; 301 } 302 303 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node, 304 struct rsnd_mod *mod, char *name) 305 { 306 struct dma_chan *chan = NULL; 307 struct device_node *np; 308 int i = 0; 309 310 for_each_child_of_node(of_node, np) { 311 if (i == rsnd_mod_id(mod) && (!chan)) 312 chan = of_dma_request_slave_channel(np, name); 313 i++; 314 } 315 316 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */ 317 of_node_put(of_node); 318 319 return chan; 320 } 321 322 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, 323 struct rsnd_dma *dma, 324 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 325 { 326 struct rsnd_priv *priv = rsnd_io_to_priv(io); 327 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 328 struct dma_chan *chan; 329 330 /* try to get DMAEngine channel */ 331 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); 332 if (IS_ERR_OR_NULL(chan)) { 333 /* 334 * DMA failed. try to PIO mode 335 * see 336 * rsnd_ssi_fallback() 337 * rsnd_rdai_continuance_probe() 338 */ 339 return -EAGAIN; 340 } 341 342 dma_release_channel(chan); 343 344 dmac->dmaen_num++; 345 346 return 0; 347 } 348 349 static int rsnd_dmaen_pointer(struct rsnd_mod *mod, 350 struct rsnd_dai_stream *io, 351 snd_pcm_uframes_t *pointer) 352 { 353 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 354 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 355 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); 356 struct dma_tx_state state; 357 enum dma_status status; 358 unsigned int pos = 0; 359 360 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state); 361 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) { 362 if (state.residue > 0 && state.residue <= dmaen->dma_len) 363 pos = dmaen->dma_len - state.residue; 364 } 365 *pointer = bytes_to_frames(runtime, pos); 366 367 return 0; 368 } 369 370 static struct rsnd_mod_ops rsnd_dmaen_ops = { 371 .name = "audmac", 372 .nolock_start = rsnd_dmaen_nolock_start, 373 .nolock_stop = rsnd_dmaen_nolock_stop, 374 .start = rsnd_dmaen_start, 375 .stop = rsnd_dmaen_stop, 376 .pointer= rsnd_dmaen_pointer, 377 }; 378 379 /* 380 * Audio DMAC peri peri 381 */ 382 static const u8 gen2_id_table_ssiu[] = { 383 0x00, /* SSI00 */ 384 0x04, /* SSI10 */ 385 0x08, /* SSI20 */ 386 0x0c, /* SSI3 */ 387 0x0d, /* SSI4 */ 388 0x0e, /* SSI5 */ 389 0x0f, /* SSI6 */ 390 0x10, /* SSI7 */ 391 0x11, /* SSI8 */ 392 0x12, /* SSI90 */ 393 }; 394 static const u8 gen2_id_table_scu[] = { 395 0x2d, /* SCU_SRCI0 */ 396 0x2e, /* SCU_SRCI1 */ 397 0x2f, /* SCU_SRCI2 */ 398 0x30, /* SCU_SRCI3 */ 399 0x31, /* SCU_SRCI4 */ 400 0x32, /* SCU_SRCI5 */ 401 0x33, /* SCU_SRCI6 */ 402 0x34, /* SCU_SRCI7 */ 403 0x35, /* SCU_SRCI8 */ 404 0x36, /* SCU_SRCI9 */ 405 }; 406 static const u8 gen2_id_table_cmd[] = { 407 0x37, /* SCU_CMD0 */ 408 0x38, /* SCU_CMD1 */ 409 }; 410 411 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io, 412 struct rsnd_mod *mod) 413 { 414 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 415 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 416 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 417 const u8 *entry = NULL; 418 int id = rsnd_mod_id(mod); 419 int size = 0; 420 421 if (mod == ssi) { 422 entry = gen2_id_table_ssiu; 423 size = ARRAY_SIZE(gen2_id_table_ssiu); 424 } else if (mod == src) { 425 entry = gen2_id_table_scu; 426 size = ARRAY_SIZE(gen2_id_table_scu); 427 } else if (mod == dvc) { 428 entry = gen2_id_table_cmd; 429 size = ARRAY_SIZE(gen2_id_table_cmd); 430 } 431 432 if ((!entry) || (size <= id)) { 433 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io)); 434 435 dev_err(dev, "unknown connection (%s[%d])\n", 436 rsnd_mod_name(mod), rsnd_mod_id(mod)); 437 438 /* use non-prohibited SRS number as error */ 439 return 0x00; /* SSI00 */ 440 } 441 442 return entry[id]; 443 } 444 445 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io, 446 struct rsnd_mod *mod_from, 447 struct rsnd_mod *mod_to) 448 { 449 return (rsnd_dmapp_get_id(io, mod_from) << 24) + 450 (rsnd_dmapp_get_id(io, mod_to) << 16); 451 } 452 453 #define rsnd_dmapp_addr(dmac, dma, reg) \ 454 (dmac->base + 0x20 + reg + \ 455 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id)) 456 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg) 457 { 458 struct rsnd_mod *mod = rsnd_mod_get(dma); 459 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 460 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 461 struct device *dev = rsnd_priv_to_dev(priv); 462 463 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data); 464 465 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg)); 466 } 467 468 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg) 469 { 470 struct rsnd_mod *mod = rsnd_mod_get(dma); 471 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 472 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 473 474 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 475 } 476 477 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) 478 { 479 struct rsnd_mod *mod = rsnd_mod_get(dma); 480 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 481 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 482 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); 483 u32 val = ioread32(addr); 484 485 val &= ~mask; 486 val |= (data & mask); 487 488 iowrite32(val, addr); 489 } 490 491 static int rsnd_dmapp_stop(struct rsnd_mod *mod, 492 struct rsnd_dai_stream *io, 493 struct rsnd_priv *priv) 494 { 495 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 496 int i; 497 498 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); 499 500 for (i = 0; i < 1024; i++) { 501 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) 502 return 0; 503 udelay(1); 504 } 505 506 return -EIO; 507 } 508 509 static int rsnd_dmapp_start(struct rsnd_mod *mod, 510 struct rsnd_dai_stream *io, 511 struct rsnd_priv *priv) 512 { 513 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 514 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 515 516 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR); 517 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR); 518 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR); 519 520 return 0; 521 } 522 523 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io, 524 struct rsnd_dma *dma, 525 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) 526 { 527 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma); 528 struct rsnd_priv *priv = rsnd_io_to_priv(io); 529 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 530 struct device *dev = rsnd_priv_to_dev(priv); 531 532 dmapp->dmapp_id = dmac->dmapp_num; 533 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE; 534 535 dmac->dmapp_num++; 536 537 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n", 538 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr); 539 540 return 0; 541 } 542 543 static struct rsnd_mod_ops rsnd_dmapp_ops = { 544 .name = "audmac-pp", 545 .start = rsnd_dmapp_start, 546 .stop = rsnd_dmapp_stop, 547 .quit = rsnd_dmapp_stop, 548 }; 549 550 /* 551 * Common DMAC Interface 552 */ 553 554 /* 555 * DMA read/write register offset 556 * 557 * RSND_xxx_I_N for Audio DMAC input 558 * RSND_xxx_O_N for Audio DMAC output 559 * RSND_xxx_I_P for Audio DMAC peri peri input 560 * RSND_xxx_O_P for Audio DMAC peri peri output 561 * 562 * ex) R-Car H2 case 563 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out 564 * SSI : 0xec541000 / 0xec241008 / 0xec24100c 565 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000 566 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000 567 * CMD : 0xec500000 / / 0xec008000 0xec308000 568 */ 569 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8) 570 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc) 571 572 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 573 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i)) 574 575 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 576 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i)) 577 578 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i)) 579 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i)) 580 581 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i)) 582 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i)) 583 584 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i)) 585 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i)) 586 587 static dma_addr_t 588 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io, 589 struct rsnd_mod *mod, 590 int is_play, int is_from) 591 { 592 struct rsnd_priv *priv = rsnd_io_to_priv(io); 593 struct device *dev = rsnd_priv_to_dev(priv); 594 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI); 595 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU); 596 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); 597 int use_src = !!rsnd_io_to_mod_src(io); 598 int use_cmd = !!rsnd_io_to_mod_dvc(io) || 599 !!rsnd_io_to_mod_mix(io) || 600 !!rsnd_io_to_mod_ctu(io); 601 int id = rsnd_mod_id(mod); 602 struct dma_addr { 603 dma_addr_t out_addr; 604 dma_addr_t in_addr; 605 } dma_addrs[3][2][3] = { 606 /* SRC */ 607 {{{ 0, 0 }, 608 /* Capture */ 609 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) }, 610 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } }, 611 /* Playback */ 612 {{ 0, 0, }, 613 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) }, 614 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } } 615 }, 616 /* SSI */ 617 /* Capture */ 618 {{{ RDMA_SSI_O_N(ssi, id), 0 }, 619 { RDMA_SSIU_O_P(ssi, id), 0 }, 620 { RDMA_SSIU_O_P(ssi, id), 0 } }, 621 /* Playback */ 622 {{ 0, RDMA_SSI_I_N(ssi, id) }, 623 { 0, RDMA_SSIU_I_P(ssi, id) }, 624 { 0, RDMA_SSIU_I_P(ssi, id) } } 625 }, 626 /* SSIU */ 627 /* Capture */ 628 {{{ RDMA_SSIU_O_N(ssi, id), 0 }, 629 { RDMA_SSIU_O_P(ssi, id), 0 }, 630 { RDMA_SSIU_O_P(ssi, id), 0 } }, 631 /* Playback */ 632 {{ 0, RDMA_SSIU_I_N(ssi, id) }, 633 { 0, RDMA_SSIU_I_P(ssi, id) }, 634 { 0, RDMA_SSIU_I_P(ssi, id) } } }, 635 }; 636 637 /* it shouldn't happen */ 638 if (use_cmd && !use_src) 639 dev_err(dev, "DVC is selected without SRC\n"); 640 641 /* use SSIU or SSI ? */ 642 if (is_ssi && rsnd_ssi_use_busif(io)) 643 is_ssi++; 644 645 return (is_from) ? 646 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr : 647 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr; 648 } 649 650 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io, 651 struct rsnd_mod *mod, 652 int is_play, int is_from) 653 { 654 struct rsnd_priv *priv = rsnd_io_to_priv(io); 655 656 /* 657 * gen1 uses default DMA addr 658 */ 659 if (rsnd_is_gen1(priv)) 660 return 0; 661 662 if (!mod) 663 return 0; 664 665 return rsnd_gen2_dma_addr(io, mod, is_play, is_from); 666 } 667 668 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */ 669 static void rsnd_dma_of_path(struct rsnd_mod *this, 670 struct rsnd_dai_stream *io, 671 int is_play, 672 struct rsnd_mod **mod_from, 673 struct rsnd_mod **mod_to) 674 { 675 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 676 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 677 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io); 678 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 679 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 680 struct rsnd_mod *mod[MOD_MAX]; 681 struct rsnd_mod *mod_start, *mod_end; 682 struct rsnd_priv *priv = rsnd_mod_to_priv(this); 683 struct device *dev = rsnd_priv_to_dev(priv); 684 int nr, i, idx; 685 686 if (!ssi) 687 return; 688 689 nr = 0; 690 for (i = 0; i < MOD_MAX; i++) { 691 mod[i] = NULL; 692 nr += !!rsnd_io_to_mod(io, i); 693 } 694 695 /* 696 * [S] -*-> [E] 697 * [S] -*-> SRC -o-> [E] 698 * [S] -*-> SRC -> DVC -o-> [E] 699 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E] 700 * 701 * playback [S] = mem 702 * [E] = SSI 703 * 704 * capture [S] = SSI 705 * [E] = mem 706 * 707 * -*-> Audio DMAC 708 * -o-> Audio DMAC peri peri 709 */ 710 mod_start = (is_play) ? NULL : ssi; 711 mod_end = (is_play) ? ssi : NULL; 712 713 idx = 0; 714 mod[idx++] = mod_start; 715 for (i = 1; i < nr; i++) { 716 if (src) { 717 mod[idx++] = src; 718 src = NULL; 719 } else if (ctu) { 720 mod[idx++] = ctu; 721 ctu = NULL; 722 } else if (mix) { 723 mod[idx++] = mix; 724 mix = NULL; 725 } else if (dvc) { 726 mod[idx++] = dvc; 727 dvc = NULL; 728 } 729 } 730 mod[idx] = mod_end; 731 732 /* 733 * | SSI | SRC | 734 * -------------+-----+-----+ 735 * is_play | o | * | 736 * !is_play | * | o | 737 */ 738 if ((this == ssi) == (is_play)) { 739 *mod_from = mod[idx - 1]; 740 *mod_to = mod[idx]; 741 } else { 742 *mod_from = mod[0]; 743 *mod_to = mod[1]; 744 } 745 746 dev_dbg(dev, "module connection (this is %s[%d])\n", 747 rsnd_mod_name(this), rsnd_mod_id(this)); 748 for (i = 0; i <= idx; i++) { 749 dev_dbg(dev, " %s[%d]%s\n", 750 rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]), 751 (mod[i] == *mod_from) ? " from" : 752 (mod[i] == *mod_to) ? " to" : ""); 753 } 754 } 755 756 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod, 757 struct rsnd_mod **dma_mod) 758 { 759 struct rsnd_mod *mod_from = NULL; 760 struct rsnd_mod *mod_to = NULL; 761 struct rsnd_priv *priv = rsnd_io_to_priv(io); 762 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 763 struct device *dev = rsnd_priv_to_dev(priv); 764 struct rsnd_mod_ops *ops; 765 enum rsnd_mod_type type; 766 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, 767 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to); 768 int is_play = rsnd_io_is_play(io); 769 int ret, dma_id; 770 771 /* 772 * DMA failed. try to PIO mode 773 * see 774 * rsnd_ssi_fallback() 775 * rsnd_rdai_continuance_probe() 776 */ 777 if (!dmac) 778 return -EAGAIN; 779 780 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to); 781 782 /* for Gen2 */ 783 if (mod_from && mod_to) { 784 ops = &rsnd_dmapp_ops; 785 attach = rsnd_dmapp_attach; 786 dma_id = dmac->dmapp_num; 787 type = RSND_MOD_AUDMAPP; 788 } else { 789 ops = &rsnd_dmaen_ops; 790 attach = rsnd_dmaen_attach; 791 dma_id = dmac->dmaen_num; 792 type = RSND_MOD_AUDMA; 793 } 794 795 /* for Gen1, overwrite */ 796 if (rsnd_is_gen1(priv)) { 797 ops = &rsnd_dmaen_ops; 798 attach = rsnd_dmaen_attach; 799 dma_id = dmac->dmaen_num; 800 type = RSND_MOD_AUDMA; 801 } 802 803 if (!(*dma_mod)) { 804 struct rsnd_dma *dma; 805 806 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 807 if (!dma) 808 return -ENOMEM; 809 810 *dma_mod = rsnd_mod_get(dma); 811 812 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL, 813 rsnd_mod_get_status, type, dma_id); 814 if (ret < 0) 815 return ret; 816 817 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n", 818 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod), 819 rsnd_mod_name(mod_from), rsnd_mod_id(mod_from), 820 rsnd_mod_name(mod_to), rsnd_mod_id(mod_to)); 821 822 ret = attach(io, dma, mod_from, mod_to); 823 if (ret < 0) 824 return ret; 825 826 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1); 827 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0); 828 dma->mod_from = mod_from; 829 dma->mod_to = mod_to; 830 } 831 832 ret = rsnd_dai_connect(*dma_mod, io, type); 833 if (ret < 0) 834 return ret; 835 836 return 0; 837 } 838 839 int rsnd_dma_probe(struct rsnd_priv *priv) 840 { 841 struct platform_device *pdev = rsnd_priv_to_pdev(priv); 842 struct device *dev = rsnd_priv_to_dev(priv); 843 struct rsnd_dma_ctrl *dmac; 844 struct resource *res; 845 846 /* 847 * for Gen1 848 */ 849 if (rsnd_is_gen1(priv)) 850 return 0; 851 852 /* 853 * for Gen2 854 */ 855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp"); 856 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL); 857 if (!dmac || !res) { 858 dev_err(dev, "dma allocate failed\n"); 859 return 0; /* it will be PIO mode */ 860 } 861 862 dmac->dmapp_num = 0; 863 dmac->base = devm_ioremap_resource(dev, res); 864 if (IS_ERR(dmac->base)) 865 return PTR_ERR(dmac->base); 866 867 priv->dma = dmac; 868 869 return 0; 870 } 871