1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/G2L DMA Controller Driver 4 * 5 * Based on imx-dma.c 6 * 7 * Copyright (C) 2021 Renesas Electronics Corp. 8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/dmaengine.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/of_platform.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/reset.h> 25 #include <linux/slab.h> 26 #include <linux/spinlock.h> 27 28 #include "../dmaengine.h" 29 #include "../virt-dma.h" 30 31 enum rz_dmac_prep_type { 32 RZ_DMAC_DESC_MEMCPY, 33 RZ_DMAC_DESC_SLAVE_SG, 34 }; 35 36 struct rz_lmdesc { 37 u32 header; 38 u32 sa; 39 u32 da; 40 u32 tb; 41 u32 chcfg; 42 u32 chitvl; 43 u32 chext; 44 u32 nxla; 45 }; 46 47 struct rz_dmac_desc { 48 struct virt_dma_desc vd; 49 dma_addr_t src; 50 dma_addr_t dest; 51 size_t len; 52 struct list_head node; 53 enum dma_transfer_direction direction; 54 enum rz_dmac_prep_type type; 55 /* For slave sg */ 56 struct scatterlist *sg; 57 unsigned int sgcount; 58 }; 59 60 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 61 62 struct rz_dmac_chan { 63 struct virt_dma_chan vc; 64 void __iomem *ch_base; 65 void __iomem *ch_cmn_base; 66 unsigned int index; 67 int irq; 68 struct rz_dmac_desc *desc; 69 int descs_allocated; 70 71 dma_addr_t src_per_address; 72 dma_addr_t dst_per_address; 73 74 u32 chcfg; 75 u32 chctrl; 76 int mid_rid; 77 78 struct list_head ld_free; 79 struct list_head ld_queue; 80 struct list_head ld_active; 81 82 struct { 83 struct rz_lmdesc *base; 84 struct rz_lmdesc *head; 85 struct rz_lmdesc *tail; 86 dma_addr_t base_dma; 87 } lmdesc; 88 }; 89 90 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 91 92 struct rz_dmac { 93 struct dma_device engine; 94 struct device *dev; 95 struct reset_control *rstc; 96 void __iomem *base; 97 void __iomem *ext_base; 98 99 unsigned int n_channels; 100 struct rz_dmac_chan *channels; 101 102 DECLARE_BITMAP(modules, 1024); 103 }; 104 105 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 106 107 /* 108 * ----------------------------------------------------------------------------- 109 * Registers 110 */ 111 112 #define CHSTAT 0x0024 113 #define CHCTRL 0x0028 114 #define CHCFG 0x002c 115 #define NXLA 0x0038 116 117 #define DCTRL 0x0000 118 119 #define EACH_CHANNEL_OFFSET 0x0040 120 #define CHANNEL_0_7_OFFSET 0x0000 121 #define CHANNEL_0_7_COMMON_BASE 0x0300 122 #define CHANNEL_8_15_OFFSET 0x0400 123 #define CHANNEL_8_15_COMMON_BASE 0x0700 124 125 #define CHSTAT_ER BIT(4) 126 #define CHSTAT_EN BIT(0) 127 128 #define CHCTRL_CLRINTMSK BIT(17) 129 #define CHCTRL_CLRSUS BIT(9) 130 #define CHCTRL_CLRTC BIT(6) 131 #define CHCTRL_CLREND BIT(5) 132 #define CHCTRL_CLRRQ BIT(4) 133 #define CHCTRL_SWRST BIT(3) 134 #define CHCTRL_STG BIT(2) 135 #define CHCTRL_CLREN BIT(1) 136 #define CHCTRL_SETEN BIT(0) 137 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 138 CHCTRL_CLRTC | CHCTRL_CLREND | \ 139 CHCTRL_CLRRQ | CHCTRL_SWRST | \ 140 CHCTRL_CLREN) 141 142 #define CHCFG_DMS BIT(31) 143 #define CHCFG_DEM BIT(24) 144 #define CHCFG_DAD BIT(21) 145 #define CHCFG_SAD BIT(20) 146 #define CHCFG_REQD BIT(3) 147 #define CHCFG_SEL(bits) ((bits) & 0x07) 148 #define CHCFG_MEM_COPY (0x80400008) 149 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16) 150 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12) 151 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 152 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 153 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 154 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 155 156 #define MID_RID_MASK GENMASK(9, 0) 157 #define CHCFG_MASK GENMASK(15, 10) 158 #define CHCFG_DS_INVALID 0xFF 159 #define DCTRL_LVINT BIT(1) 160 #define DCTRL_PR BIT(0) 161 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 162 163 /* LINK MODE DESCRIPTOR */ 164 #define HEADER_LV BIT(0) 165 166 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 167 #define RZ_DMAC_MAX_CHANNELS 16 168 #define DMAC_NR_LMDESC 64 169 170 /* 171 * ----------------------------------------------------------------------------- 172 * Device access 173 */ 174 175 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 176 unsigned int offset) 177 { 178 writel(val, dmac->base + offset); 179 } 180 181 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 182 unsigned int offset) 183 { 184 writel(val, dmac->ext_base + offset); 185 } 186 187 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 188 { 189 return readl(dmac->ext_base + offset); 190 } 191 192 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 193 unsigned int offset, int which) 194 { 195 if (which) 196 writel(val, channel->ch_base + offset); 197 else 198 writel(val, channel->ch_cmn_base + offset); 199 } 200 201 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 202 unsigned int offset, int which) 203 { 204 if (which) 205 return readl(channel->ch_base + offset); 206 else 207 return readl(channel->ch_cmn_base + offset); 208 } 209 210 /* 211 * ----------------------------------------------------------------------------- 212 * Initialization 213 */ 214 215 static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 216 struct rz_lmdesc *lmdesc) 217 { 218 u32 nxla; 219 220 channel->lmdesc.base = lmdesc; 221 channel->lmdesc.head = lmdesc; 222 channel->lmdesc.tail = lmdesc; 223 nxla = channel->lmdesc.base_dma; 224 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 225 lmdesc->header = 0; 226 nxla += sizeof(*lmdesc); 227 lmdesc->nxla = nxla; 228 lmdesc++; 229 } 230 231 lmdesc->header = 0; 232 lmdesc->nxla = channel->lmdesc.base_dma; 233 } 234 235 /* 236 * ----------------------------------------------------------------------------- 237 * Descriptors preparation 238 */ 239 240 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 241 { 242 struct rz_lmdesc *lmdesc = channel->lmdesc.head; 243 244 while (!(lmdesc->header & HEADER_LV)) { 245 lmdesc->header = 0; 246 lmdesc++; 247 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 248 lmdesc = channel->lmdesc.base; 249 } 250 channel->lmdesc.head = lmdesc; 251 } 252 253 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 254 { 255 struct dma_chan *chan = &channel->vc.chan; 256 struct rz_dmac *dmac = to_rz_dmac(chan->device); 257 unsigned long flags; 258 u32 nxla; 259 u32 chctrl; 260 u32 chstat; 261 262 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 263 264 local_irq_save(flags); 265 266 rz_dmac_lmdesc_recycle(channel); 267 268 nxla = channel->lmdesc.base_dma + 269 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 270 channel->lmdesc.base)); 271 272 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 273 if (!(chstat & CHSTAT_EN)) { 274 chctrl = (channel->chctrl | CHCTRL_SETEN); 275 rz_dmac_ch_writel(channel, nxla, NXLA, 1); 276 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 277 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 278 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 279 } 280 281 local_irq_restore(flags); 282 } 283 284 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 285 { 286 struct dma_chan *chan = &channel->vc.chan; 287 struct rz_dmac *dmac = to_rz_dmac(chan->device); 288 unsigned long flags; 289 290 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 291 292 local_irq_save(flags); 293 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 294 local_irq_restore(flags); 295 } 296 297 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 298 { 299 u32 dmars_offset = (nr / 2) * 4; 300 u32 shift = (nr % 2) * 16; 301 u32 dmars32; 302 303 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 304 dmars32 &= ~(0xffff << shift); 305 dmars32 |= dmars << shift; 306 307 rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 308 } 309 310 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 311 { 312 struct dma_chan *chan = &channel->vc.chan; 313 struct rz_dmac *dmac = to_rz_dmac(chan->device); 314 struct rz_lmdesc *lmdesc = channel->lmdesc.tail; 315 struct rz_dmac_desc *d = channel->desc; 316 u32 chcfg = CHCFG_MEM_COPY; 317 318 /* prepare descriptor */ 319 lmdesc->sa = d->src; 320 lmdesc->da = d->dest; 321 lmdesc->tb = d->len; 322 lmdesc->chcfg = chcfg; 323 lmdesc->chitvl = 0; 324 lmdesc->chext = 0; 325 lmdesc->header = HEADER_LV; 326 327 rz_dmac_set_dmars_register(dmac, channel->index, 0); 328 329 channel->chcfg = chcfg; 330 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 331 } 332 333 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 334 { 335 struct dma_chan *chan = &channel->vc.chan; 336 struct rz_dmac *dmac = to_rz_dmac(chan->device); 337 struct rz_dmac_desc *d = channel->desc; 338 struct scatterlist *sg, *sgl = d->sg; 339 struct rz_lmdesc *lmdesc; 340 unsigned int i, sg_len = d->sgcount; 341 342 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 343 344 if (d->direction == DMA_DEV_TO_MEM) { 345 channel->chcfg |= CHCFG_SAD; 346 channel->chcfg &= ~CHCFG_REQD; 347 } else { 348 channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 349 } 350 351 lmdesc = channel->lmdesc.tail; 352 353 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 354 if (d->direction == DMA_DEV_TO_MEM) { 355 lmdesc->sa = channel->src_per_address; 356 lmdesc->da = sg_dma_address(sg); 357 } else { 358 lmdesc->sa = sg_dma_address(sg); 359 lmdesc->da = channel->dst_per_address; 360 } 361 362 lmdesc->tb = sg_dma_len(sg); 363 lmdesc->chitvl = 0; 364 lmdesc->chext = 0; 365 if (i == (sg_len - 1)) { 366 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 367 lmdesc->header = HEADER_LV; 368 } else { 369 lmdesc->chcfg = channel->chcfg; 370 lmdesc->header = HEADER_LV; 371 } 372 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 373 lmdesc = channel->lmdesc.base; 374 } 375 376 channel->lmdesc.tail = lmdesc; 377 378 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 379 channel->chctrl = CHCTRL_SETEN; 380 } 381 382 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 383 { 384 struct rz_dmac_desc *d = chan->desc; 385 struct virt_dma_desc *vd; 386 387 vd = vchan_next_desc(&chan->vc); 388 if (!vd) 389 return 0; 390 391 list_del(&vd->node); 392 393 switch (d->type) { 394 case RZ_DMAC_DESC_MEMCPY: 395 rz_dmac_prepare_desc_for_memcpy(chan); 396 break; 397 398 case RZ_DMAC_DESC_SLAVE_SG: 399 rz_dmac_prepare_descs_for_slave_sg(chan); 400 break; 401 402 default: 403 return -EINVAL; 404 } 405 406 rz_dmac_enable_hw(chan); 407 408 return 0; 409 } 410 411 /* 412 * ----------------------------------------------------------------------------- 413 * DMA engine operations 414 */ 415 416 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 417 { 418 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 419 420 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 421 struct rz_dmac_desc *desc; 422 423 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 424 if (!desc) 425 break; 426 427 list_add_tail(&desc->node, &channel->ld_free); 428 channel->descs_allocated++; 429 } 430 431 if (!channel->descs_allocated) 432 return -ENOMEM; 433 434 return channel->descs_allocated; 435 } 436 437 static void rz_dmac_free_chan_resources(struct dma_chan *chan) 438 { 439 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 440 struct rz_dmac *dmac = to_rz_dmac(chan->device); 441 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 442 struct rz_dmac_desc *desc, *_desc; 443 unsigned long flags; 444 unsigned int i; 445 446 spin_lock_irqsave(&channel->vc.lock, flags); 447 448 for (i = 0; i < DMAC_NR_LMDESC; i++) 449 lmdesc[i].header = 0; 450 451 rz_dmac_disable_hw(channel); 452 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 453 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 454 455 if (channel->mid_rid >= 0) { 456 clear_bit(channel->mid_rid, dmac->modules); 457 channel->mid_rid = -EINVAL; 458 } 459 460 spin_unlock_irqrestore(&channel->vc.lock, flags); 461 462 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 463 kfree(desc); 464 channel->descs_allocated--; 465 } 466 467 INIT_LIST_HEAD(&channel->ld_free); 468 vchan_free_chan_resources(&channel->vc); 469 } 470 471 static struct dma_async_tx_descriptor * 472 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 473 size_t len, unsigned long flags) 474 { 475 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 476 struct rz_dmac *dmac = to_rz_dmac(chan->device); 477 struct rz_dmac_desc *desc; 478 479 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 480 __func__, channel->index, &src, &dest, len); 481 482 if (list_empty(&channel->ld_free)) 483 return NULL; 484 485 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 486 487 desc->type = RZ_DMAC_DESC_MEMCPY; 488 desc->src = src; 489 desc->dest = dest; 490 desc->len = len; 491 desc->direction = DMA_MEM_TO_MEM; 492 493 list_move_tail(channel->ld_free.next, &channel->ld_queue); 494 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 495 } 496 497 static struct dma_async_tx_descriptor * 498 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 499 unsigned int sg_len, 500 enum dma_transfer_direction direction, 501 unsigned long flags, void *context) 502 { 503 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 504 struct rz_dmac_desc *desc; 505 struct scatterlist *sg; 506 int dma_length = 0; 507 int i = 0; 508 509 if (list_empty(&channel->ld_free)) 510 return NULL; 511 512 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 513 514 for_each_sg(sgl, sg, sg_len, i) { 515 dma_length += sg_dma_len(sg); 516 } 517 518 desc->type = RZ_DMAC_DESC_SLAVE_SG; 519 desc->sg = sgl; 520 desc->sgcount = sg_len; 521 desc->len = dma_length; 522 desc->direction = direction; 523 524 if (direction == DMA_DEV_TO_MEM) 525 desc->src = channel->src_per_address; 526 else 527 desc->dest = channel->dst_per_address; 528 529 list_move_tail(channel->ld_free.next, &channel->ld_queue); 530 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 531 } 532 533 static int rz_dmac_terminate_all(struct dma_chan *chan) 534 { 535 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 536 unsigned long flags; 537 LIST_HEAD(head); 538 539 rz_dmac_disable_hw(channel); 540 spin_lock_irqsave(&channel->vc.lock, flags); 541 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 542 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 543 spin_unlock_irqrestore(&channel->vc.lock, flags); 544 vchan_get_all_descriptors(&channel->vc, &head); 545 vchan_dma_desc_free_list(&channel->vc, &head); 546 547 return 0; 548 } 549 550 static void rz_dmac_issue_pending(struct dma_chan *chan) 551 { 552 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 553 struct rz_dmac *dmac = to_rz_dmac(chan->device); 554 struct rz_dmac_desc *desc; 555 unsigned long flags; 556 557 spin_lock_irqsave(&channel->vc.lock, flags); 558 559 if (!list_empty(&channel->ld_queue)) { 560 desc = list_first_entry(&channel->ld_queue, 561 struct rz_dmac_desc, node); 562 channel->desc = desc; 563 if (vchan_issue_pending(&channel->vc)) { 564 if (rz_dmac_xfer_desc(channel) < 0) 565 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 566 channel->index); 567 else 568 list_move_tail(channel->ld_queue.next, 569 &channel->ld_active); 570 } 571 } 572 573 spin_unlock_irqrestore(&channel->vc.lock, flags); 574 } 575 576 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 577 { 578 u8 i; 579 static const enum dma_slave_buswidth ds_lut[] = { 580 DMA_SLAVE_BUSWIDTH_1_BYTE, 581 DMA_SLAVE_BUSWIDTH_2_BYTES, 582 DMA_SLAVE_BUSWIDTH_4_BYTES, 583 DMA_SLAVE_BUSWIDTH_8_BYTES, 584 DMA_SLAVE_BUSWIDTH_16_BYTES, 585 DMA_SLAVE_BUSWIDTH_32_BYTES, 586 DMA_SLAVE_BUSWIDTH_64_BYTES, 587 DMA_SLAVE_BUSWIDTH_128_BYTES, 588 }; 589 590 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 591 if (ds_lut[i] == ds) 592 return i; 593 } 594 595 return CHCFG_DS_INVALID; 596 } 597 598 static int rz_dmac_config(struct dma_chan *chan, 599 struct dma_slave_config *config) 600 { 601 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 602 u32 val; 603 604 channel->src_per_address = config->src_addr; 605 channel->dst_per_address = config->dst_addr; 606 607 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 608 if (val == CHCFG_DS_INVALID) 609 return -EINVAL; 610 611 channel->chcfg &= ~CHCFG_FILL_DDS_MASK; 612 channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); 613 614 val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 615 if (val == CHCFG_DS_INVALID) 616 return -EINVAL; 617 618 channel->chcfg &= ~CHCFG_FILL_SDS_MASK; 619 channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); 620 621 return 0; 622 } 623 624 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 625 { 626 /* 627 * Place holder 628 * Descriptor allocation is done during alloc_chan_resources and 629 * get freed during free_chan_resources. 630 * list is used to manage the descriptors and avoid any memory 631 * allocation/free during DMA read/write. 632 */ 633 } 634 635 static void rz_dmac_device_synchronize(struct dma_chan *chan) 636 { 637 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 638 struct rz_dmac *dmac = to_rz_dmac(chan->device); 639 u32 chstat; 640 int ret; 641 642 ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN), 643 100, 100000, false, channel, CHSTAT, 1); 644 if (ret < 0) 645 dev_warn(dmac->dev, "DMA Timeout"); 646 647 rz_dmac_set_dmars_register(dmac, channel->index, 0); 648 } 649 650 /* 651 * ----------------------------------------------------------------------------- 652 * IRQ handling 653 */ 654 655 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 656 { 657 struct dma_chan *chan = &channel->vc.chan; 658 struct rz_dmac *dmac = to_rz_dmac(chan->device); 659 u32 chstat, chctrl; 660 661 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 662 if (chstat & CHSTAT_ER) { 663 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 664 channel->index, chstat); 665 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 666 goto done; 667 } 668 669 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 670 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 671 done: 672 return; 673 } 674 675 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 676 { 677 struct rz_dmac_chan *channel = dev_id; 678 679 if (channel) { 680 rz_dmac_irq_handle_channel(channel); 681 return IRQ_WAKE_THREAD; 682 } 683 /* handle DMAERR irq */ 684 return IRQ_HANDLED; 685 } 686 687 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 688 { 689 struct rz_dmac_chan *channel = dev_id; 690 struct rz_dmac_desc *desc = NULL; 691 unsigned long flags; 692 693 spin_lock_irqsave(&channel->vc.lock, flags); 694 695 if (list_empty(&channel->ld_active)) { 696 /* Someone might have called terminate all */ 697 goto out; 698 } 699 700 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 701 vchan_cookie_complete(&desc->vd); 702 list_move_tail(channel->ld_active.next, &channel->ld_free); 703 if (!list_empty(&channel->ld_queue)) { 704 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 705 node); 706 channel->desc = desc; 707 if (rz_dmac_xfer_desc(channel) == 0) 708 list_move_tail(channel->ld_queue.next, &channel->ld_active); 709 } 710 out: 711 spin_unlock_irqrestore(&channel->vc.lock, flags); 712 713 return IRQ_HANDLED; 714 } 715 716 /* 717 * ----------------------------------------------------------------------------- 718 * OF xlate and channel filter 719 */ 720 721 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 722 { 723 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 724 struct rz_dmac *dmac = to_rz_dmac(chan->device); 725 struct of_phandle_args *dma_spec = arg; 726 u32 ch_cfg; 727 728 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 729 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 730 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 731 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 732 733 return !test_and_set_bit(channel->mid_rid, dmac->modules); 734 } 735 736 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 737 struct of_dma *ofdma) 738 { 739 dma_cap_mask_t mask; 740 741 if (dma_spec->args_count != 1) 742 return NULL; 743 744 /* Only slave DMA channels can be allocated via DT */ 745 dma_cap_zero(mask); 746 dma_cap_set(DMA_SLAVE, mask); 747 748 return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); 749 } 750 751 /* 752 * ----------------------------------------------------------------------------- 753 * Probe and remove 754 */ 755 756 static int rz_dmac_chan_probe(struct rz_dmac *dmac, 757 struct rz_dmac_chan *channel, 758 unsigned int index) 759 { 760 struct platform_device *pdev = to_platform_device(dmac->dev); 761 struct rz_lmdesc *lmdesc; 762 char pdev_irqname[5]; 763 char *irqname; 764 int ret; 765 766 channel->index = index; 767 channel->mid_rid = -EINVAL; 768 769 /* Request the channel interrupt. */ 770 sprintf(pdev_irqname, "ch%u", index); 771 channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 772 if (channel->irq < 0) 773 return channel->irq; 774 775 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 776 dev_name(dmac->dev), index); 777 if (!irqname) 778 return -ENOMEM; 779 780 ret = devm_request_threaded_irq(dmac->dev, channel->irq, 781 rz_dmac_irq_handler, 782 rz_dmac_irq_handler_thread, 0, 783 irqname, channel); 784 if (ret) { 785 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 786 channel->irq, ret); 787 return ret; 788 } 789 790 /* Set io base address for each channel */ 791 if (index < 8) { 792 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 793 EACH_CHANNEL_OFFSET * index; 794 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 795 } else { 796 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 797 EACH_CHANNEL_OFFSET * (index - 8); 798 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 799 } 800 801 /* Allocate descriptors */ 802 lmdesc = dma_alloc_coherent(&pdev->dev, 803 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 804 &channel->lmdesc.base_dma, GFP_KERNEL); 805 if (!lmdesc) { 806 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 807 return -ENOMEM; 808 } 809 rz_lmdesc_setup(channel, lmdesc); 810 811 /* Initialize register for each channel */ 812 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 813 814 channel->vc.desc_free = rz_dmac_virt_desc_free; 815 vchan_init(&channel->vc, &dmac->engine); 816 INIT_LIST_HEAD(&channel->ld_queue); 817 INIT_LIST_HEAD(&channel->ld_free); 818 INIT_LIST_HEAD(&channel->ld_active); 819 820 return 0; 821 } 822 823 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 824 { 825 struct device_node *np = dev->of_node; 826 int ret; 827 828 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 829 if (ret < 0) { 830 dev_err(dev, "unable to read dma-channels property\n"); 831 return ret; 832 } 833 834 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 835 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 836 return -EINVAL; 837 } 838 839 return 0; 840 } 841 842 static int rz_dmac_probe(struct platform_device *pdev) 843 { 844 const char *irqname = "error"; 845 struct dma_device *engine; 846 struct rz_dmac *dmac; 847 int channel_num; 848 unsigned int i; 849 int ret; 850 int irq; 851 852 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 853 if (!dmac) 854 return -ENOMEM; 855 856 dmac->dev = &pdev->dev; 857 platform_set_drvdata(pdev, dmac); 858 859 ret = rz_dmac_parse_of(&pdev->dev, dmac); 860 if (ret < 0) 861 return ret; 862 863 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 864 sizeof(*dmac->channels), GFP_KERNEL); 865 if (!dmac->channels) 866 return -ENOMEM; 867 868 /* Request resources */ 869 dmac->base = devm_platform_ioremap_resource(pdev, 0); 870 if (IS_ERR(dmac->base)) 871 return PTR_ERR(dmac->base); 872 873 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 874 if (IS_ERR(dmac->ext_base)) 875 return PTR_ERR(dmac->ext_base); 876 877 /* Register interrupt handler for error */ 878 irq = platform_get_irq_byname(pdev, irqname); 879 if (irq < 0) 880 return irq; 881 882 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 883 irqname, NULL); 884 if (ret) { 885 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 886 irq, ret); 887 return ret; 888 } 889 890 /* Initialize the channels. */ 891 INIT_LIST_HEAD(&dmac->engine.channels); 892 893 dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev); 894 if (IS_ERR(dmac->rstc)) 895 return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), 896 "failed to get resets\n"); 897 898 pm_runtime_enable(&pdev->dev); 899 ret = pm_runtime_resume_and_get(&pdev->dev); 900 if (ret < 0) { 901 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); 902 goto err_pm_disable; 903 } 904 905 ret = reset_control_deassert(dmac->rstc); 906 if (ret) 907 goto err_pm_runtime_put; 908 909 for (i = 0; i < dmac->n_channels; i++) { 910 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 911 if (ret < 0) 912 goto err; 913 } 914 915 /* Register the DMAC as a DMA provider for DT. */ 916 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 917 NULL); 918 if (ret < 0) 919 goto err; 920 921 /* Register the DMA engine device. */ 922 engine = &dmac->engine; 923 dma_cap_set(DMA_SLAVE, engine->cap_mask); 924 dma_cap_set(DMA_MEMCPY, engine->cap_mask); 925 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 926 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 927 928 engine->dev = &pdev->dev; 929 930 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 931 engine->device_free_chan_resources = rz_dmac_free_chan_resources; 932 engine->device_tx_status = dma_cookie_status; 933 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 934 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 935 engine->device_config = rz_dmac_config; 936 engine->device_terminate_all = rz_dmac_terminate_all; 937 engine->device_issue_pending = rz_dmac_issue_pending; 938 engine->device_synchronize = rz_dmac_device_synchronize; 939 940 engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 941 dma_set_max_seg_size(engine->dev, U32_MAX); 942 943 ret = dma_async_device_register(engine); 944 if (ret < 0) { 945 dev_err(&pdev->dev, "unable to register\n"); 946 goto dma_register_err; 947 } 948 return 0; 949 950 dma_register_err: 951 of_dma_controller_free(pdev->dev.of_node); 952 err: 953 channel_num = i ? i - 1 : 0; 954 for (i = 0; i < channel_num; i++) { 955 struct rz_dmac_chan *channel = &dmac->channels[i]; 956 957 dma_free_coherent(&pdev->dev, 958 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 959 channel->lmdesc.base, 960 channel->lmdesc.base_dma); 961 } 962 963 reset_control_assert(dmac->rstc); 964 err_pm_runtime_put: 965 pm_runtime_put(&pdev->dev); 966 err_pm_disable: 967 pm_runtime_disable(&pdev->dev); 968 969 return ret; 970 } 971 972 static int rz_dmac_remove(struct platform_device *pdev) 973 { 974 struct rz_dmac *dmac = platform_get_drvdata(pdev); 975 unsigned int i; 976 977 dma_async_device_unregister(&dmac->engine); 978 of_dma_controller_free(pdev->dev.of_node); 979 for (i = 0; i < dmac->n_channels; i++) { 980 struct rz_dmac_chan *channel = &dmac->channels[i]; 981 982 dma_free_coherent(&pdev->dev, 983 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 984 channel->lmdesc.base, 985 channel->lmdesc.base_dma); 986 } 987 reset_control_assert(dmac->rstc); 988 pm_runtime_put(&pdev->dev); 989 pm_runtime_disable(&pdev->dev); 990 991 return 0; 992 } 993 994 static const struct of_device_id of_rz_dmac_match[] = { 995 { .compatible = "renesas,rz-dmac", }, 996 { /* Sentinel */ } 997 }; 998 MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 999 1000 static struct platform_driver rz_dmac_driver = { 1001 .driver = { 1002 .name = "rz-dmac", 1003 .of_match_table = of_rz_dmac_match, 1004 }, 1005 .probe = rz_dmac_probe, 1006 .remove = rz_dmac_remove, 1007 }; 1008 1009 module_platform_driver(rz_dmac_driver); 1010 1011 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 1012 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 1013 MODULE_LICENSE("GPL v2"); 1014