1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/G2L DMA Controller Driver 4 * 5 * Based on imx-dma.c 6 * 7 * Copyright (C) 2021 Renesas Electronics Corp. 8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 */ 11 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 #include <linux/of_dma.h> 19 #include <linux/of_platform.h> 20 #include <linux/platform_device.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 24 #include "../dmaengine.h" 25 #include "../virt-dma.h" 26 27 enum rz_dmac_prep_type { 28 RZ_DMAC_DESC_MEMCPY, 29 RZ_DMAC_DESC_SLAVE_SG, 30 }; 31 32 struct rz_lmdesc { 33 u32 header; 34 u32 sa; 35 u32 da; 36 u32 tb; 37 u32 chcfg; 38 u32 chitvl; 39 u32 chext; 40 u32 nxla; 41 }; 42 43 struct rz_dmac_desc { 44 struct virt_dma_desc vd; 45 dma_addr_t src; 46 dma_addr_t dest; 47 size_t len; 48 struct list_head node; 49 enum dma_transfer_direction direction; 50 enum rz_dmac_prep_type type; 51 /* For slave sg */ 52 struct scatterlist *sg; 53 unsigned int sgcount; 54 }; 55 56 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 57 58 struct rz_dmac_chan { 59 struct virt_dma_chan vc; 60 void __iomem *ch_base; 61 void __iomem *ch_cmn_base; 62 unsigned int index; 63 int irq; 64 struct rz_dmac_desc *desc; 65 int descs_allocated; 66 67 enum dma_slave_buswidth src_word_size; 68 enum dma_slave_buswidth dst_word_size; 69 dma_addr_t src_per_address; 70 dma_addr_t dst_per_address; 71 72 u32 chcfg; 73 u32 chctrl; 74 int mid_rid; 75 76 struct list_head ld_free; 77 struct list_head ld_queue; 78 struct list_head ld_active; 79 80 struct { 81 struct rz_lmdesc *base; 82 struct rz_lmdesc *head; 83 struct rz_lmdesc *tail; 84 dma_addr_t base_dma; 85 } lmdesc; 86 }; 87 88 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 89 90 struct rz_dmac { 91 struct dma_device engine; 92 struct device *dev; 93 void __iomem *base; 94 void __iomem *ext_base; 95 96 unsigned int n_channels; 97 struct rz_dmac_chan *channels; 98 99 DECLARE_BITMAP(modules, 1024); 100 }; 101 102 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 103 104 /* 105 * ----------------------------------------------------------------------------- 106 * Registers 107 */ 108 109 #define CHSTAT 0x0024 110 #define CHCTRL 0x0028 111 #define CHCFG 0x002c 112 #define NXLA 0x0038 113 114 #define DCTRL 0x0000 115 116 #define EACH_CHANNEL_OFFSET 0x0040 117 #define CHANNEL_0_7_OFFSET 0x0000 118 #define CHANNEL_0_7_COMMON_BASE 0x0300 119 #define CHANNEL_8_15_OFFSET 0x0400 120 #define CHANNEL_8_15_COMMON_BASE 0x0700 121 122 #define CHSTAT_ER BIT(4) 123 #define CHSTAT_EN BIT(0) 124 125 #define CHCTRL_CLRINTMSK BIT(17) 126 #define CHCTRL_CLRSUS BIT(9) 127 #define CHCTRL_CLRTC BIT(6) 128 #define CHCTRL_CLREND BIT(5) 129 #define CHCTRL_CLRRQ BIT(4) 130 #define CHCTRL_SWRST BIT(3) 131 #define CHCTRL_STG BIT(2) 132 #define CHCTRL_CLREN BIT(1) 133 #define CHCTRL_SETEN BIT(0) 134 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 135 CHCTRL_CLRTC | CHCTRL_CLREND | \ 136 CHCTRL_CLRRQ | CHCTRL_SWRST | \ 137 CHCTRL_CLREN) 138 139 #define CHCFG_DMS BIT(31) 140 #define CHCFG_DEM BIT(24) 141 #define CHCFG_DAD BIT(21) 142 #define CHCFG_SAD BIT(20) 143 #define CHCFG_REQD BIT(3) 144 #define CHCFG_SEL(bits) ((bits) & 0x07) 145 #define CHCFG_MEM_COPY (0x80400008) 146 #define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16)) 147 #define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12)) 148 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 149 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 150 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 151 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 152 153 #define MID_RID_MASK GENMASK(9, 0) 154 #define CHCFG_MASK GENMASK(15, 10) 155 #define CHCFG_DS_INVALID 0xFF 156 #define DCTRL_LVINT BIT(1) 157 #define DCTRL_PR BIT(0) 158 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 159 160 /* LINK MODE DESCRIPTOR */ 161 #define HEADER_LV BIT(0) 162 163 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 164 #define RZ_DMAC_MAX_CHANNELS 16 165 #define DMAC_NR_LMDESC 64 166 167 /* 168 * ----------------------------------------------------------------------------- 169 * Device access 170 */ 171 172 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 173 unsigned int offset) 174 { 175 writel(val, dmac->base + offset); 176 } 177 178 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 179 unsigned int offset) 180 { 181 writel(val, dmac->ext_base + offset); 182 } 183 184 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 185 { 186 return readl(dmac->ext_base + offset); 187 } 188 189 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 190 unsigned int offset, int which) 191 { 192 if (which) 193 writel(val, channel->ch_base + offset); 194 else 195 writel(val, channel->ch_cmn_base + offset); 196 } 197 198 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 199 unsigned int offset, int which) 200 { 201 if (which) 202 return readl(channel->ch_base + offset); 203 else 204 return readl(channel->ch_cmn_base + offset); 205 } 206 207 /* 208 * ----------------------------------------------------------------------------- 209 * Initialization 210 */ 211 212 static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 213 struct rz_lmdesc *lmdesc) 214 { 215 u32 nxla; 216 217 channel->lmdesc.base = lmdesc; 218 channel->lmdesc.head = lmdesc; 219 channel->lmdesc.tail = lmdesc; 220 nxla = channel->lmdesc.base_dma; 221 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 222 lmdesc->header = 0; 223 nxla += sizeof(*lmdesc); 224 lmdesc->nxla = nxla; 225 lmdesc++; 226 } 227 228 lmdesc->header = 0; 229 lmdesc->nxla = channel->lmdesc.base_dma; 230 } 231 232 /* 233 * ----------------------------------------------------------------------------- 234 * Descriptors preparation 235 */ 236 237 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 238 { 239 struct rz_lmdesc *lmdesc = channel->lmdesc.head; 240 241 while (!(lmdesc->header & HEADER_LV)) { 242 lmdesc->header = 0; 243 lmdesc++; 244 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 245 lmdesc = channel->lmdesc.base; 246 } 247 channel->lmdesc.head = lmdesc; 248 } 249 250 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 251 { 252 struct dma_chan *chan = &channel->vc.chan; 253 struct rz_dmac *dmac = to_rz_dmac(chan->device); 254 unsigned long flags; 255 u32 nxla; 256 u32 chctrl; 257 u32 chstat; 258 259 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 260 261 local_irq_save(flags); 262 263 rz_dmac_lmdesc_recycle(channel); 264 265 nxla = channel->lmdesc.base_dma + 266 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 267 channel->lmdesc.base)); 268 269 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 270 if (!(chstat & CHSTAT_EN)) { 271 chctrl = (channel->chctrl | CHCTRL_SETEN); 272 rz_dmac_ch_writel(channel, nxla, NXLA, 1); 273 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 274 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 275 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 276 } 277 278 local_irq_restore(flags); 279 } 280 281 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 282 { 283 struct dma_chan *chan = &channel->vc.chan; 284 struct rz_dmac *dmac = to_rz_dmac(chan->device); 285 unsigned long flags; 286 287 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 288 289 local_irq_save(flags); 290 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 291 local_irq_restore(flags); 292 } 293 294 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 295 { 296 u32 dmars_offset = (nr / 2) * 4; 297 u32 shift = (nr % 2) * 16; 298 u32 dmars32; 299 300 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 301 dmars32 &= ~(0xffff << shift); 302 dmars32 |= dmars << shift; 303 304 rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 305 } 306 307 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 308 { 309 struct dma_chan *chan = &channel->vc.chan; 310 struct rz_dmac *dmac = to_rz_dmac(chan->device); 311 struct rz_lmdesc *lmdesc = channel->lmdesc.tail; 312 struct rz_dmac_desc *d = channel->desc; 313 u32 chcfg = CHCFG_MEM_COPY; 314 315 /* prepare descriptor */ 316 lmdesc->sa = d->src; 317 lmdesc->da = d->dest; 318 lmdesc->tb = d->len; 319 lmdesc->chcfg = chcfg; 320 lmdesc->chitvl = 0; 321 lmdesc->chext = 0; 322 lmdesc->header = HEADER_LV; 323 324 rz_dmac_set_dmars_register(dmac, channel->index, 0); 325 326 channel->chcfg = chcfg; 327 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 328 } 329 330 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 331 { 332 struct dma_chan *chan = &channel->vc.chan; 333 struct rz_dmac *dmac = to_rz_dmac(chan->device); 334 struct rz_dmac_desc *d = channel->desc; 335 struct scatterlist *sg, *sgl = d->sg; 336 struct rz_lmdesc *lmdesc; 337 unsigned int i, sg_len = d->sgcount; 338 339 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 340 341 if (d->direction == DMA_DEV_TO_MEM) { 342 channel->chcfg |= CHCFG_SAD; 343 channel->chcfg &= ~CHCFG_REQD; 344 } else { 345 channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 346 } 347 348 lmdesc = channel->lmdesc.tail; 349 350 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 351 if (d->direction == DMA_DEV_TO_MEM) { 352 lmdesc->sa = channel->src_per_address; 353 lmdesc->da = sg_dma_address(sg); 354 } else { 355 lmdesc->sa = sg_dma_address(sg); 356 lmdesc->da = channel->dst_per_address; 357 } 358 359 lmdesc->tb = sg_dma_len(sg); 360 lmdesc->chitvl = 0; 361 lmdesc->chext = 0; 362 if (i == (sg_len - 1)) { 363 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 364 lmdesc->header = HEADER_LV; 365 } else { 366 lmdesc->chcfg = channel->chcfg; 367 lmdesc->header = HEADER_LV; 368 } 369 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 370 lmdesc = channel->lmdesc.base; 371 } 372 373 channel->lmdesc.tail = lmdesc; 374 375 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 376 channel->chctrl = CHCTRL_SETEN; 377 } 378 379 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 380 { 381 struct rz_dmac_desc *d = chan->desc; 382 struct virt_dma_desc *vd; 383 384 vd = vchan_next_desc(&chan->vc); 385 if (!vd) 386 return 0; 387 388 list_del(&vd->node); 389 390 switch (d->type) { 391 case RZ_DMAC_DESC_MEMCPY: 392 rz_dmac_prepare_desc_for_memcpy(chan); 393 break; 394 395 case RZ_DMAC_DESC_SLAVE_SG: 396 rz_dmac_prepare_descs_for_slave_sg(chan); 397 break; 398 399 default: 400 return -EINVAL; 401 } 402 403 rz_dmac_enable_hw(chan); 404 405 return 0; 406 } 407 408 /* 409 * ----------------------------------------------------------------------------- 410 * DMA engine operations 411 */ 412 413 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 414 { 415 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 416 417 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 418 struct rz_dmac_desc *desc; 419 420 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 421 if (!desc) 422 break; 423 424 list_add_tail(&desc->node, &channel->ld_free); 425 channel->descs_allocated++; 426 } 427 428 if (!channel->descs_allocated) 429 return -ENOMEM; 430 431 return channel->descs_allocated; 432 } 433 434 static void rz_dmac_free_chan_resources(struct dma_chan *chan) 435 { 436 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 437 struct rz_dmac *dmac = to_rz_dmac(chan->device); 438 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 439 struct rz_dmac_desc *desc, *_desc; 440 unsigned long flags; 441 unsigned int i; 442 443 spin_lock_irqsave(&channel->vc.lock, flags); 444 445 for (i = 0; i < DMAC_NR_LMDESC; i++) 446 lmdesc[i].header = 0; 447 448 rz_dmac_disable_hw(channel); 449 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 450 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 451 452 if (channel->mid_rid >= 0) { 453 clear_bit(channel->mid_rid, dmac->modules); 454 channel->mid_rid = -EINVAL; 455 } 456 457 spin_unlock_irqrestore(&channel->vc.lock, flags); 458 459 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 460 kfree(desc); 461 channel->descs_allocated--; 462 } 463 464 INIT_LIST_HEAD(&channel->ld_free); 465 vchan_free_chan_resources(&channel->vc); 466 } 467 468 static struct dma_async_tx_descriptor * 469 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 470 size_t len, unsigned long flags) 471 { 472 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 473 struct rz_dmac *dmac = to_rz_dmac(chan->device); 474 struct rz_dmac_desc *desc; 475 476 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 477 __func__, channel->index, &src, &dest, len); 478 479 if (list_empty(&channel->ld_free)) 480 return NULL; 481 482 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 483 484 desc->type = RZ_DMAC_DESC_MEMCPY; 485 desc->src = src; 486 desc->dest = dest; 487 desc->len = len; 488 desc->direction = DMA_MEM_TO_MEM; 489 490 list_move_tail(channel->ld_free.next, &channel->ld_queue); 491 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 492 } 493 494 static struct dma_async_tx_descriptor * 495 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 496 unsigned int sg_len, 497 enum dma_transfer_direction direction, 498 unsigned long flags, void *context) 499 { 500 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 501 struct rz_dmac_desc *desc; 502 struct scatterlist *sg; 503 int dma_length = 0; 504 int i = 0; 505 506 if (list_empty(&channel->ld_free)) 507 return NULL; 508 509 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 510 511 for_each_sg(sgl, sg, sg_len, i) { 512 dma_length += sg_dma_len(sg); 513 } 514 515 desc->type = RZ_DMAC_DESC_SLAVE_SG; 516 desc->sg = sgl; 517 desc->sgcount = sg_len; 518 desc->len = dma_length; 519 desc->direction = direction; 520 521 if (direction == DMA_DEV_TO_MEM) 522 desc->src = channel->src_per_address; 523 else 524 desc->dest = channel->dst_per_address; 525 526 list_move_tail(channel->ld_free.next, &channel->ld_queue); 527 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 528 } 529 530 static int rz_dmac_terminate_all(struct dma_chan *chan) 531 { 532 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 533 unsigned long flags; 534 LIST_HEAD(head); 535 536 rz_dmac_disable_hw(channel); 537 spin_lock_irqsave(&channel->vc.lock, flags); 538 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 539 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 540 spin_unlock_irqrestore(&channel->vc.lock, flags); 541 vchan_get_all_descriptors(&channel->vc, &head); 542 vchan_dma_desc_free_list(&channel->vc, &head); 543 544 return 0; 545 } 546 547 static void rz_dmac_issue_pending(struct dma_chan *chan) 548 { 549 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 550 struct rz_dmac *dmac = to_rz_dmac(chan->device); 551 struct rz_dmac_desc *desc; 552 unsigned long flags; 553 554 spin_lock_irqsave(&channel->vc.lock, flags); 555 556 if (!list_empty(&channel->ld_queue)) { 557 desc = list_first_entry(&channel->ld_queue, 558 struct rz_dmac_desc, node); 559 channel->desc = desc; 560 if (vchan_issue_pending(&channel->vc)) { 561 if (rz_dmac_xfer_desc(channel) < 0) 562 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 563 channel->index); 564 else 565 list_move_tail(channel->ld_queue.next, 566 &channel->ld_active); 567 } 568 } 569 570 spin_unlock_irqrestore(&channel->vc.lock, flags); 571 } 572 573 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 574 { 575 u8 i; 576 const enum dma_slave_buswidth ds_lut[] = { 577 DMA_SLAVE_BUSWIDTH_1_BYTE, 578 DMA_SLAVE_BUSWIDTH_2_BYTES, 579 DMA_SLAVE_BUSWIDTH_4_BYTES, 580 DMA_SLAVE_BUSWIDTH_8_BYTES, 581 DMA_SLAVE_BUSWIDTH_16_BYTES, 582 DMA_SLAVE_BUSWIDTH_32_BYTES, 583 DMA_SLAVE_BUSWIDTH_64_BYTES, 584 DMA_SLAVE_BUSWIDTH_128_BYTES, 585 }; 586 587 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 588 if (ds_lut[i] == ds) 589 return i; 590 } 591 592 return CHCFG_DS_INVALID; 593 } 594 595 static int rz_dmac_config(struct dma_chan *chan, 596 struct dma_slave_config *config) 597 { 598 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 599 u32 val; 600 601 channel->src_per_address = config->src_addr; 602 channel->src_word_size = config->src_addr_width; 603 channel->dst_per_address = config->dst_addr; 604 channel->dst_word_size = config->dst_addr_width; 605 606 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 607 if (val == CHCFG_DS_INVALID) 608 return -EINVAL; 609 610 channel->chcfg |= CHCFG_FILL_DDS(val); 611 612 val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 613 if (val == CHCFG_DS_INVALID) 614 return -EINVAL; 615 616 channel->chcfg |= CHCFG_FILL_SDS(val); 617 618 return 0; 619 } 620 621 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 622 { 623 /* 624 * Place holder 625 * Descriptor allocation is done during alloc_chan_resources and 626 * get freed during free_chan_resources. 627 * list is used to manage the descriptors and avoid any memory 628 * allocation/free during DMA read/write. 629 */ 630 } 631 632 /* 633 * ----------------------------------------------------------------------------- 634 * IRQ handling 635 */ 636 637 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 638 { 639 struct dma_chan *chan = &channel->vc.chan; 640 struct rz_dmac *dmac = to_rz_dmac(chan->device); 641 u32 chstat, chctrl; 642 643 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 644 if (chstat & CHSTAT_ER) { 645 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 646 channel->index, chstat); 647 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 648 goto done; 649 } 650 651 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 652 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 653 done: 654 return; 655 } 656 657 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 658 { 659 struct rz_dmac_chan *channel = dev_id; 660 661 if (channel) { 662 rz_dmac_irq_handle_channel(channel); 663 return IRQ_WAKE_THREAD; 664 } 665 /* handle DMAERR irq */ 666 return IRQ_HANDLED; 667 } 668 669 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 670 { 671 struct rz_dmac_chan *channel = dev_id; 672 struct rz_dmac_desc *desc = NULL; 673 unsigned long flags; 674 675 spin_lock_irqsave(&channel->vc.lock, flags); 676 677 if (list_empty(&channel->ld_active)) { 678 /* Someone might have called terminate all */ 679 goto out; 680 } 681 682 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 683 vchan_cookie_complete(&desc->vd); 684 list_move_tail(channel->ld_active.next, &channel->ld_free); 685 if (!list_empty(&channel->ld_queue)) { 686 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 687 node); 688 channel->desc = desc; 689 if (rz_dmac_xfer_desc(channel) == 0) 690 list_move_tail(channel->ld_queue.next, &channel->ld_active); 691 } 692 out: 693 spin_unlock_irqrestore(&channel->vc.lock, flags); 694 695 return IRQ_HANDLED; 696 } 697 698 /* 699 * ----------------------------------------------------------------------------- 700 * OF xlate and channel filter 701 */ 702 703 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 704 { 705 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 706 struct rz_dmac *dmac = to_rz_dmac(chan->device); 707 struct of_phandle_args *dma_spec = arg; 708 u32 ch_cfg; 709 710 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 711 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 712 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 713 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 714 715 return !test_and_set_bit(channel->mid_rid, dmac->modules); 716 } 717 718 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 719 struct of_dma *ofdma) 720 { 721 dma_cap_mask_t mask; 722 723 if (dma_spec->args_count != 1) 724 return NULL; 725 726 /* Only slave DMA channels can be allocated via DT */ 727 dma_cap_zero(mask); 728 dma_cap_set(DMA_SLAVE, mask); 729 730 return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); 731 } 732 733 /* 734 * ----------------------------------------------------------------------------- 735 * Probe and remove 736 */ 737 738 static int rz_dmac_chan_probe(struct rz_dmac *dmac, 739 struct rz_dmac_chan *channel, 740 unsigned int index) 741 { 742 struct platform_device *pdev = to_platform_device(dmac->dev); 743 struct rz_lmdesc *lmdesc; 744 char pdev_irqname[5]; 745 char *irqname; 746 int ret; 747 748 channel->index = index; 749 channel->mid_rid = -EINVAL; 750 751 /* Request the channel interrupt. */ 752 sprintf(pdev_irqname, "ch%u", index); 753 channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 754 if (channel->irq < 0) 755 return channel->irq; 756 757 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 758 dev_name(dmac->dev), index); 759 if (!irqname) 760 return -ENOMEM; 761 762 ret = devm_request_threaded_irq(dmac->dev, channel->irq, 763 rz_dmac_irq_handler, 764 rz_dmac_irq_handler_thread, 0, 765 irqname, channel); 766 if (ret) { 767 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 768 channel->irq, ret); 769 return ret; 770 } 771 772 /* Set io base address for each channel */ 773 if (index < 8) { 774 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 775 EACH_CHANNEL_OFFSET * index; 776 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 777 } else { 778 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 779 EACH_CHANNEL_OFFSET * (index - 8); 780 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 781 } 782 783 /* Allocate descriptors */ 784 lmdesc = dma_alloc_coherent(&pdev->dev, 785 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 786 &channel->lmdesc.base_dma, GFP_KERNEL); 787 if (!lmdesc) { 788 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 789 return -ENOMEM; 790 } 791 rz_lmdesc_setup(channel, lmdesc); 792 793 /* Initialize register for each channel */ 794 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 795 796 channel->vc.desc_free = rz_dmac_virt_desc_free; 797 vchan_init(&channel->vc, &dmac->engine); 798 INIT_LIST_HEAD(&channel->ld_queue); 799 INIT_LIST_HEAD(&channel->ld_free); 800 INIT_LIST_HEAD(&channel->ld_active); 801 802 return 0; 803 } 804 805 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 806 { 807 struct device_node *np = dev->of_node; 808 int ret; 809 810 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 811 if (ret < 0) { 812 dev_err(dev, "unable to read dma-channels property\n"); 813 return ret; 814 } 815 816 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 817 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 818 return -EINVAL; 819 } 820 821 return 0; 822 } 823 824 static int rz_dmac_probe(struct platform_device *pdev) 825 { 826 const char *irqname = "error"; 827 struct dma_device *engine; 828 struct rz_dmac *dmac; 829 int channel_num; 830 unsigned int i; 831 int ret; 832 int irq; 833 834 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 835 if (!dmac) 836 return -ENOMEM; 837 838 dmac->dev = &pdev->dev; 839 platform_set_drvdata(pdev, dmac); 840 841 ret = rz_dmac_parse_of(&pdev->dev, dmac); 842 if (ret < 0) 843 return ret; 844 845 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 846 sizeof(*dmac->channels), GFP_KERNEL); 847 if (!dmac->channels) 848 return -ENOMEM; 849 850 /* Request resources */ 851 dmac->base = devm_platform_ioremap_resource(pdev, 0); 852 if (IS_ERR(dmac->base)) 853 return PTR_ERR(dmac->base); 854 855 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 856 if (IS_ERR(dmac->ext_base)) 857 return PTR_ERR(dmac->ext_base); 858 859 /* Register interrupt handler for error */ 860 irq = platform_get_irq_byname(pdev, irqname); 861 if (irq < 0) 862 return irq; 863 864 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 865 irqname, NULL); 866 if (ret) { 867 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 868 irq, ret); 869 return ret; 870 } 871 872 /* Initialize the channels. */ 873 INIT_LIST_HEAD(&dmac->engine.channels); 874 875 for (i = 0; i < dmac->n_channels; i++) { 876 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 877 if (ret < 0) 878 goto err; 879 } 880 881 /* Register the DMAC as a DMA provider for DT. */ 882 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 883 NULL); 884 if (ret < 0) 885 goto err; 886 887 /* Register the DMA engine device. */ 888 engine = &dmac->engine; 889 dma_cap_set(DMA_SLAVE, engine->cap_mask); 890 dma_cap_set(DMA_MEMCPY, engine->cap_mask); 891 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 892 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 893 894 engine->dev = &pdev->dev; 895 896 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 897 engine->device_free_chan_resources = rz_dmac_free_chan_resources; 898 engine->device_tx_status = dma_cookie_status; 899 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 900 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 901 engine->device_config = rz_dmac_config; 902 engine->device_terminate_all = rz_dmac_terminate_all; 903 engine->device_issue_pending = rz_dmac_issue_pending; 904 905 engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 906 dma_set_max_seg_size(engine->dev, U32_MAX); 907 908 ret = dma_async_device_register(engine); 909 if (ret < 0) { 910 dev_err(&pdev->dev, "unable to register\n"); 911 goto dma_register_err; 912 } 913 return 0; 914 915 dma_register_err: 916 of_dma_controller_free(pdev->dev.of_node); 917 err: 918 channel_num = i ? i - 1 : 0; 919 for (i = 0; i < channel_num; i++) { 920 struct rz_dmac_chan *channel = &dmac->channels[i]; 921 922 dma_free_coherent(&pdev->dev, 923 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 924 channel->lmdesc.base, 925 channel->lmdesc.base_dma); 926 } 927 928 return ret; 929 } 930 931 static int rz_dmac_remove(struct platform_device *pdev) 932 { 933 struct rz_dmac *dmac = platform_get_drvdata(pdev); 934 unsigned int i; 935 936 for (i = 0; i < dmac->n_channels; i++) { 937 struct rz_dmac_chan *channel = &dmac->channels[i]; 938 939 dma_free_coherent(&pdev->dev, 940 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 941 channel->lmdesc.base, 942 channel->lmdesc.base_dma); 943 } 944 of_dma_controller_free(pdev->dev.of_node); 945 dma_async_device_unregister(&dmac->engine); 946 947 return 0; 948 } 949 950 static const struct of_device_id of_rz_dmac_match[] = { 951 { .compatible = "renesas,rz-dmac", }, 952 { /* Sentinel */ } 953 }; 954 MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 955 956 static struct platform_driver rz_dmac_driver = { 957 .driver = { 958 .name = "rz-dmac", 959 .of_match_table = of_rz_dmac_match, 960 }, 961 .probe = rz_dmac_probe, 962 .remove = rz_dmac_remove, 963 }; 964 965 module_platform_driver(rz_dmac_driver); 966 967 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 968 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 969 MODULE_LICENSE("GPL v2"); 970