1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd 4 * Author: Sugar <shuge@allwinnertech.com> 5 * 6 * Copyright (C) 2014 Maxime Ripard 7 * Maxime Ripard <maxime.ripard@free-electrons.com> 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/dmapool.h> 15 #include <linux/interrupt.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 #include <linux/of_dma.h> 19 #include <linux/platform_device.h> 20 #include <linux/reset.h> 21 #include <linux/slab.h> 22 #include <linux/types.h> 23 24 #include "virt-dma.h" 25 26 /* 27 * Common registers 28 */ 29 #define DMA_IRQ_EN(x) ((x) * 0x04) 30 #define DMA_IRQ_HALF BIT(0) 31 #define DMA_IRQ_PKG BIT(1) 32 #define DMA_IRQ_QUEUE BIT(2) 33 34 #define DMA_IRQ_CHAN_NR 8 35 #define DMA_IRQ_CHAN_WIDTH 4 36 37 38 #define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10) 39 40 #define DMA_STAT 0x30 41 42 /* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */ 43 #define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4) 44 45 /* 46 * sun8i specific registers 47 */ 48 #define SUN8I_DMA_GATE 0x20 49 #define SUN8I_DMA_GATE_ENABLE 0x4 50 51 #define SUNXI_H3_SECURE_REG 0x20 52 #define SUNXI_H3_DMA_GATE 0x28 53 #define SUNXI_H3_DMA_GATE_ENABLE 0x4 54 /* 55 * Channels specific registers 56 */ 57 #define DMA_CHAN_ENABLE 0x00 58 #define DMA_CHAN_ENABLE_START BIT(0) 59 #define DMA_CHAN_ENABLE_STOP 0 60 61 #define DMA_CHAN_PAUSE 0x04 62 #define DMA_CHAN_PAUSE_PAUSE BIT(1) 63 #define DMA_CHAN_PAUSE_RESUME 0 64 65 #define DMA_CHAN_LLI_ADDR 0x08 66 67 #define DMA_CHAN_CUR_CFG 0x0c 68 #define DMA_CHAN_MAX_DRQ_A31 0x1f 69 #define DMA_CHAN_MAX_DRQ_H6 0x3f 70 #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) 71 #define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6) 72 #define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) 73 #define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8) 74 #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) 75 #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) 76 #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) 77 78 #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) 79 #define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16) 80 #define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) 81 #define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16) 82 #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) 83 #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) 84 #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) 85 86 #define DMA_CHAN_CUR_SRC 0x10 87 88 #define DMA_CHAN_CUR_DST 0x14 89 90 #define DMA_CHAN_CUR_CNT 0x18 91 92 #define DMA_CHAN_CUR_PARA 0x1c 93 94 /* 95 * LLI address mangling 96 * 97 * The LLI link physical address is also mangled, but we avoid dealing 98 * with that by allocating LLIs from the DMA32 zone. 99 */ 100 #define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16) 101 #define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18) 102 103 /* 104 * Various hardware related defines 105 */ 106 #define LLI_LAST_ITEM 0xfffff800 107 #define NORMAL_WAIT 8 108 #define DRQ_SDRAM 1 109 #define LINEAR_MODE 0 110 #define IO_MODE 1 111 112 /* forward declaration */ 113 struct sun6i_dma_dev; 114 115 /* 116 * Hardware channels / ports representation 117 * 118 * The hardware is used in several SoCs, with differing numbers 119 * of channels and endpoints. This structure ties those numbers 120 * to a certain compatible string. 121 */ 122 struct sun6i_dma_config { 123 u32 nr_max_channels; 124 u32 nr_max_requests; 125 u32 nr_max_vchans; 126 /* 127 * In the datasheets/user manuals of newer Allwinner SoCs, a special 128 * bit (bit 2 at register 0x20) is present. 129 * It's named "DMA MCLK interface circuit auto gating bit" in the 130 * documents, and the footnote of this register says that this bit 131 * should be set up when initializing the DMA controller. 132 * Allwinner A23/A33 user manuals do not have this bit documented, 133 * however these SoCs really have and need this bit, as seen in the 134 * BSP kernel source code. 135 */ 136 void (*clock_autogate_enable)(struct sun6i_dma_dev *); 137 void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); 138 void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); 139 void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode); 140 u32 src_burst_lengths; 141 u32 dst_burst_lengths; 142 u32 src_addr_widths; 143 u32 dst_addr_widths; 144 bool has_high_addr; 145 bool has_mbus_clk; 146 }; 147 148 /* 149 * Hardware representation of the LLI 150 * 151 * The hardware will be fed the physical address of this structure, 152 * and read its content in order to start the transfer. 153 */ 154 struct sun6i_dma_lli { 155 u32 cfg; 156 u32 src; 157 u32 dst; 158 u32 len; 159 u32 para; 160 u32 p_lli_next; 161 162 /* 163 * This field is not used by the DMA controller, but will be 164 * used by the CPU to go through the list (mostly for dumping 165 * or freeing it). 166 */ 167 struct sun6i_dma_lli *v_lli_next; 168 }; 169 170 171 struct sun6i_desc { 172 struct virt_dma_desc vd; 173 dma_addr_t p_lli; 174 struct sun6i_dma_lli *v_lli; 175 }; 176 177 struct sun6i_pchan { 178 u32 idx; 179 void __iomem *base; 180 struct sun6i_vchan *vchan; 181 struct sun6i_desc *desc; 182 struct sun6i_desc *done; 183 }; 184 185 struct sun6i_vchan { 186 struct virt_dma_chan vc; 187 struct list_head node; 188 struct dma_slave_config cfg; 189 struct sun6i_pchan *phy; 190 u8 port; 191 u8 irq_type; 192 bool cyclic; 193 }; 194 195 struct sun6i_dma_dev { 196 struct dma_device slave; 197 void __iomem *base; 198 struct clk *clk; 199 struct clk *clk_mbus; 200 int irq; 201 spinlock_t lock; 202 struct reset_control *rstc; 203 struct tasklet_struct task; 204 atomic_t tasklet_shutdown; 205 struct list_head pending; 206 struct dma_pool *pool; 207 struct sun6i_pchan *pchans; 208 struct sun6i_vchan *vchans; 209 const struct sun6i_dma_config *cfg; 210 u32 num_pchans; 211 u32 num_vchans; 212 u32 max_request; 213 }; 214 215 static struct device *chan2dev(struct dma_chan *chan) 216 { 217 return &chan->dev->device; 218 } 219 220 static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d) 221 { 222 return container_of(d, struct sun6i_dma_dev, slave); 223 } 224 225 static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan) 226 { 227 return container_of(chan, struct sun6i_vchan, vc.chan); 228 } 229 230 static inline struct sun6i_desc * 231 to_sun6i_desc(struct dma_async_tx_descriptor *tx) 232 { 233 return container_of(tx, struct sun6i_desc, vd.tx); 234 } 235 236 static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) 237 { 238 dev_dbg(sdev->slave.dev, "Common register:\n" 239 "\tmask0(%04x): 0x%08x\n" 240 "\tmask1(%04x): 0x%08x\n" 241 "\tpend0(%04x): 0x%08x\n" 242 "\tpend1(%04x): 0x%08x\n" 243 "\tstats(%04x): 0x%08x\n", 244 DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)), 245 DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)), 246 DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)), 247 DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)), 248 DMA_STAT, readl(sdev->base + DMA_STAT)); 249 } 250 251 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, 252 struct sun6i_pchan *pchan) 253 { 254 dev_dbg(sdev->slave.dev, "Chan %d reg:\n" 255 "\t___en(%04x): \t0x%08x\n" 256 "\tpause(%04x): \t0x%08x\n" 257 "\tstart(%04x): \t0x%08x\n" 258 "\t__cfg(%04x): \t0x%08x\n" 259 "\t__src(%04x): \t0x%08x\n" 260 "\t__dst(%04x): \t0x%08x\n" 261 "\tcount(%04x): \t0x%08x\n" 262 "\t_para(%04x): \t0x%08x\n\n", 263 pchan->idx, 264 DMA_CHAN_ENABLE, 265 readl(pchan->base + DMA_CHAN_ENABLE), 266 DMA_CHAN_PAUSE, 267 readl(pchan->base + DMA_CHAN_PAUSE), 268 DMA_CHAN_LLI_ADDR, 269 readl(pchan->base + DMA_CHAN_LLI_ADDR), 270 DMA_CHAN_CUR_CFG, 271 readl(pchan->base + DMA_CHAN_CUR_CFG), 272 DMA_CHAN_CUR_SRC, 273 readl(pchan->base + DMA_CHAN_CUR_SRC), 274 DMA_CHAN_CUR_DST, 275 readl(pchan->base + DMA_CHAN_CUR_DST), 276 DMA_CHAN_CUR_CNT, 277 readl(pchan->base + DMA_CHAN_CUR_CNT), 278 DMA_CHAN_CUR_PARA, 279 readl(pchan->base + DMA_CHAN_CUR_PARA)); 280 } 281 282 static inline s8 convert_burst(u32 maxburst) 283 { 284 switch (maxburst) { 285 case 1: 286 return 0; 287 case 4: 288 return 1; 289 case 8: 290 return 2; 291 case 16: 292 return 3; 293 default: 294 return -EINVAL; 295 } 296 } 297 298 static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) 299 { 300 return ilog2(addr_width); 301 } 302 303 static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev) 304 { 305 writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE); 306 } 307 308 static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev) 309 { 310 writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE); 311 } 312 313 static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst) 314 { 315 *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) | 316 DMA_CHAN_CFG_DST_BURST_A31(dst_burst); 317 } 318 319 static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) 320 { 321 *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) | 322 DMA_CHAN_CFG_DST_BURST_H3(dst_burst); 323 } 324 325 static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) 326 { 327 *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) | 328 DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); 329 } 330 331 static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq) 332 { 333 *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) | 334 DMA_CHAN_CFG_DST_DRQ_H6(dst_drq); 335 } 336 337 static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) 338 { 339 *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | 340 DMA_CHAN_CFG_DST_MODE_A31(dst_mode); 341 } 342 343 static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode) 344 { 345 *p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) | 346 DMA_CHAN_CFG_DST_MODE_H6(dst_mode); 347 } 348 349 static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) 350 { 351 struct sun6i_desc *txd = pchan->desc; 352 struct sun6i_dma_lli *lli; 353 size_t bytes; 354 dma_addr_t pos; 355 356 pos = readl(pchan->base + DMA_CHAN_LLI_ADDR); 357 bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); 358 359 if (pos == LLI_LAST_ITEM) 360 return bytes; 361 362 for (lli = txd->v_lli; lli; lli = lli->v_lli_next) { 363 if (lli->p_lli_next == pos) { 364 for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next) 365 bytes += lli->len; 366 break; 367 } 368 } 369 370 return bytes; 371 } 372 373 static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, 374 struct sun6i_dma_lli *next, 375 dma_addr_t next_phy, 376 struct sun6i_desc *txd) 377 { 378 if ((!prev && !txd) || !next) 379 return NULL; 380 381 if (!prev) { 382 txd->p_lli = next_phy; 383 txd->v_lli = next; 384 } else { 385 prev->p_lli_next = next_phy; 386 prev->v_lli_next = next; 387 } 388 389 next->p_lli_next = LLI_LAST_ITEM; 390 next->v_lli_next = NULL; 391 392 return next; 393 } 394 395 static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, 396 struct sun6i_dma_lli *v_lli, 397 dma_addr_t p_lli) 398 { 399 dev_dbg(chan2dev(&vchan->vc.chan), 400 "\n\tdesc:\tp - %pad v - 0x%p\n" 401 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" 402 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", 403 &p_lli, v_lli, 404 v_lli->cfg, v_lli->src, v_lli->dst, 405 v_lli->len, v_lli->para, v_lli->p_lli_next); 406 } 407 408 static void sun6i_dma_free_desc(struct virt_dma_desc *vd) 409 { 410 struct sun6i_desc *txd = to_sun6i_desc(&vd->tx); 411 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device); 412 struct sun6i_dma_lli *v_lli, *v_next; 413 dma_addr_t p_lli, p_next; 414 415 if (unlikely(!txd)) 416 return; 417 418 p_lli = txd->p_lli; 419 v_lli = txd->v_lli; 420 421 while (v_lli) { 422 v_next = v_lli->v_lli_next; 423 p_next = v_lli->p_lli_next; 424 425 dma_pool_free(sdev->pool, v_lli, p_lli); 426 427 v_lli = v_next; 428 p_lli = p_next; 429 } 430 431 kfree(txd); 432 } 433 434 static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) 435 { 436 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); 437 struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); 438 struct sun6i_pchan *pchan = vchan->phy; 439 u32 irq_val, irq_reg, irq_offset; 440 441 if (!pchan) 442 return -EAGAIN; 443 444 if (!desc) { 445 pchan->desc = NULL; 446 pchan->done = NULL; 447 return -EAGAIN; 448 } 449 450 list_del(&desc->node); 451 452 pchan->desc = to_sun6i_desc(&desc->tx); 453 pchan->done = NULL; 454 455 sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli); 456 457 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; 458 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; 459 460 vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE; 461 462 irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg)); 463 irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) << 464 (irq_offset * DMA_IRQ_CHAN_WIDTH)); 465 irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH); 466 writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg)); 467 468 writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); 469 writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); 470 471 sun6i_dma_dump_com_regs(sdev); 472 sun6i_dma_dump_chan_regs(sdev, pchan); 473 474 return 0; 475 } 476 477 static void sun6i_dma_tasklet(struct tasklet_struct *t) 478 { 479 struct sun6i_dma_dev *sdev = from_tasklet(sdev, t, task); 480 struct sun6i_vchan *vchan; 481 struct sun6i_pchan *pchan; 482 unsigned int pchan_alloc = 0; 483 unsigned int pchan_idx; 484 485 list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) { 486 spin_lock_irq(&vchan->vc.lock); 487 488 pchan = vchan->phy; 489 490 if (pchan && pchan->done) { 491 if (sun6i_dma_start_desc(vchan)) { 492 /* 493 * No current txd associated with this channel 494 */ 495 dev_dbg(sdev->slave.dev, "pchan %u: free\n", 496 pchan->idx); 497 498 /* Mark this channel free */ 499 vchan->phy = NULL; 500 pchan->vchan = NULL; 501 } 502 } 503 spin_unlock_irq(&vchan->vc.lock); 504 } 505 506 spin_lock_irq(&sdev->lock); 507 for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { 508 pchan = &sdev->pchans[pchan_idx]; 509 510 if (pchan->vchan || list_empty(&sdev->pending)) 511 continue; 512 513 vchan = list_first_entry(&sdev->pending, 514 struct sun6i_vchan, node); 515 516 /* Remove from pending channels */ 517 list_del_init(&vchan->node); 518 pchan_alloc |= BIT(pchan_idx); 519 520 /* Mark this channel allocated */ 521 pchan->vchan = vchan; 522 vchan->phy = pchan; 523 dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n", 524 pchan->idx, &vchan->vc); 525 } 526 spin_unlock_irq(&sdev->lock); 527 528 for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { 529 if (!(pchan_alloc & BIT(pchan_idx))) 530 continue; 531 532 pchan = sdev->pchans + pchan_idx; 533 vchan = pchan->vchan; 534 if (vchan) { 535 spin_lock_irq(&vchan->vc.lock); 536 sun6i_dma_start_desc(vchan); 537 spin_unlock_irq(&vchan->vc.lock); 538 } 539 } 540 } 541 542 static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) 543 { 544 struct sun6i_dma_dev *sdev = dev_id; 545 struct sun6i_vchan *vchan; 546 struct sun6i_pchan *pchan; 547 int i, j, ret = IRQ_NONE; 548 u32 status; 549 550 for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) { 551 status = readl(sdev->base + DMA_IRQ_STAT(i)); 552 if (!status) 553 continue; 554 555 dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n", 556 i ? "high" : "low", status); 557 558 writel(status, sdev->base + DMA_IRQ_STAT(i)); 559 560 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) { 561 pchan = sdev->pchans + j; 562 vchan = pchan->vchan; 563 if (vchan && (status & vchan->irq_type)) { 564 if (vchan->cyclic) { 565 vchan_cyclic_callback(&pchan->desc->vd); 566 } else { 567 spin_lock(&vchan->vc.lock); 568 vchan_cookie_complete(&pchan->desc->vd); 569 pchan->done = pchan->desc; 570 spin_unlock(&vchan->vc.lock); 571 } 572 } 573 574 status = status >> DMA_IRQ_CHAN_WIDTH; 575 } 576 577 if (!atomic_read(&sdev->tasklet_shutdown)) 578 tasklet_schedule(&sdev->task); 579 ret = IRQ_HANDLED; 580 } 581 582 return ret; 583 } 584 585 static int set_config(struct sun6i_dma_dev *sdev, 586 struct dma_slave_config *sconfig, 587 enum dma_transfer_direction direction, 588 u32 *p_cfg) 589 { 590 enum dma_slave_buswidth src_addr_width, dst_addr_width; 591 u32 src_maxburst, dst_maxburst; 592 s8 src_width, dst_width, src_burst, dst_burst; 593 594 src_addr_width = sconfig->src_addr_width; 595 dst_addr_width = sconfig->dst_addr_width; 596 src_maxburst = sconfig->src_maxburst; 597 dst_maxburst = sconfig->dst_maxburst; 598 599 switch (direction) { 600 case DMA_MEM_TO_DEV: 601 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 602 src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 603 src_maxburst = src_maxburst ? src_maxburst : 8; 604 break; 605 case DMA_DEV_TO_MEM: 606 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 607 dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 608 dst_maxburst = dst_maxburst ? dst_maxburst : 8; 609 break; 610 default: 611 return -EINVAL; 612 } 613 614 if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths)) 615 return -EINVAL; 616 if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths)) 617 return -EINVAL; 618 if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths)) 619 return -EINVAL; 620 if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths)) 621 return -EINVAL; 622 623 src_width = convert_buswidth(src_addr_width); 624 dst_width = convert_buswidth(dst_addr_width); 625 dst_burst = convert_burst(dst_maxburst); 626 src_burst = convert_burst(src_maxburst); 627 628 *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | 629 DMA_CHAN_CFG_DST_WIDTH(dst_width); 630 631 sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst); 632 633 return 0; 634 } 635 636 static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev, 637 struct sun6i_dma_lli *v_lli, 638 dma_addr_t src, dma_addr_t dst) 639 { 640 v_lli->src = lower_32_bits(src); 641 v_lli->dst = lower_32_bits(dst); 642 643 if (sdev->cfg->has_high_addr) 644 v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) | 645 DST_HIGH_ADDR(upper_32_bits(dst)); 646 } 647 648 static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( 649 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 650 size_t len, unsigned long flags) 651 { 652 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 653 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 654 struct sun6i_dma_lli *v_lli; 655 struct sun6i_desc *txd; 656 dma_addr_t p_lli; 657 s8 burst, width; 658 659 dev_dbg(chan2dev(chan), 660 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", 661 __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags); 662 663 if (!len) 664 return NULL; 665 666 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 667 if (!txd) 668 return NULL; 669 670 v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 671 if (!v_lli) { 672 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); 673 goto err_txd_free; 674 } 675 676 v_lli->len = len; 677 v_lli->para = NORMAL_WAIT; 678 sun6i_dma_set_addr(sdev, v_lli, src, dest); 679 680 burst = convert_burst(8); 681 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); 682 v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) | 683 DMA_CHAN_CFG_DST_WIDTH(width); 684 685 sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); 686 sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); 687 sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE); 688 689 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); 690 691 sun6i_dma_dump_lli(vchan, v_lli, p_lli); 692 693 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 694 695 err_txd_free: 696 kfree(txd); 697 return NULL; 698 } 699 700 static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( 701 struct dma_chan *chan, struct scatterlist *sgl, 702 unsigned int sg_len, enum dma_transfer_direction dir, 703 unsigned long flags, void *context) 704 { 705 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 706 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 707 struct dma_slave_config *sconfig = &vchan->cfg; 708 struct sun6i_dma_lli *v_lli, *prev = NULL; 709 struct sun6i_desc *txd; 710 struct scatterlist *sg; 711 dma_addr_t p_lli; 712 u32 lli_cfg; 713 int i, ret; 714 715 if (!sgl) 716 return NULL; 717 718 ret = set_config(sdev, sconfig, dir, &lli_cfg); 719 if (ret) { 720 dev_err(chan2dev(chan), "Invalid DMA configuration\n"); 721 return NULL; 722 } 723 724 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 725 if (!txd) 726 return NULL; 727 728 for_each_sg(sgl, sg, sg_len, i) { 729 v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 730 if (!v_lli) 731 goto err_lli_free; 732 733 v_lli->len = sg_dma_len(sg); 734 v_lli->para = NORMAL_WAIT; 735 736 if (dir == DMA_MEM_TO_DEV) { 737 sun6i_dma_set_addr(sdev, v_lli, 738 sg_dma_address(sg), 739 sconfig->dst_addr); 740 v_lli->cfg = lli_cfg; 741 sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 742 sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); 743 744 dev_dbg(chan2dev(chan), 745 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", 746 __func__, vchan->vc.chan.chan_id, 747 &sconfig->dst_addr, &sg_dma_address(sg), 748 sg_dma_len(sg), flags); 749 750 } else { 751 sun6i_dma_set_addr(sdev, v_lli, 752 sconfig->src_addr, 753 sg_dma_address(sg)); 754 v_lli->cfg = lli_cfg; 755 sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 756 sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); 757 758 dev_dbg(chan2dev(chan), 759 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", 760 __func__, vchan->vc.chan.chan_id, 761 &sg_dma_address(sg), &sconfig->src_addr, 762 sg_dma_len(sg), flags); 763 } 764 765 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); 766 } 767 768 dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); 769 for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 770 p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 771 sun6i_dma_dump_lli(vchan, v_lli, p_lli); 772 773 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 774 775 err_lli_free: 776 for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 777 p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 778 dma_pool_free(sdev->pool, v_lli, p_lli); 779 kfree(txd); 780 return NULL; 781 } 782 783 static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( 784 struct dma_chan *chan, 785 dma_addr_t buf_addr, 786 size_t buf_len, 787 size_t period_len, 788 enum dma_transfer_direction dir, 789 unsigned long flags) 790 { 791 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 792 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 793 struct dma_slave_config *sconfig = &vchan->cfg; 794 struct sun6i_dma_lli *v_lli, *prev = NULL; 795 struct sun6i_desc *txd; 796 dma_addr_t p_lli; 797 u32 lli_cfg; 798 unsigned int i, periods = buf_len / period_len; 799 int ret; 800 801 ret = set_config(sdev, sconfig, dir, &lli_cfg); 802 if (ret) { 803 dev_err(chan2dev(chan), "Invalid DMA configuration\n"); 804 return NULL; 805 } 806 807 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 808 if (!txd) 809 return NULL; 810 811 for (i = 0; i < periods; i++) { 812 v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 813 if (!v_lli) { 814 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); 815 goto err_lli_free; 816 } 817 818 v_lli->len = period_len; 819 v_lli->para = NORMAL_WAIT; 820 821 if (dir == DMA_MEM_TO_DEV) { 822 sun6i_dma_set_addr(sdev, v_lli, 823 buf_addr + period_len * i, 824 sconfig->dst_addr); 825 v_lli->cfg = lli_cfg; 826 sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 827 sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); 828 } else { 829 sun6i_dma_set_addr(sdev, v_lli, 830 sconfig->src_addr, 831 buf_addr + period_len * i); 832 v_lli->cfg = lli_cfg; 833 sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 834 sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); 835 } 836 837 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); 838 } 839 840 prev->p_lli_next = txd->p_lli; /* cyclic list */ 841 842 vchan->cyclic = true; 843 844 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 845 846 err_lli_free: 847 for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 848 p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 849 dma_pool_free(sdev->pool, v_lli, p_lli); 850 kfree(txd); 851 return NULL; 852 } 853 854 static int sun6i_dma_config(struct dma_chan *chan, 855 struct dma_slave_config *config) 856 { 857 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 858 859 memcpy(&vchan->cfg, config, sizeof(*config)); 860 861 return 0; 862 } 863 864 static int sun6i_dma_pause(struct dma_chan *chan) 865 { 866 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 867 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 868 struct sun6i_pchan *pchan = vchan->phy; 869 870 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); 871 872 if (pchan) { 873 writel(DMA_CHAN_PAUSE_PAUSE, 874 pchan->base + DMA_CHAN_PAUSE); 875 } else { 876 spin_lock(&sdev->lock); 877 list_del_init(&vchan->node); 878 spin_unlock(&sdev->lock); 879 } 880 881 return 0; 882 } 883 884 static int sun6i_dma_resume(struct dma_chan *chan) 885 { 886 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 887 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 888 struct sun6i_pchan *pchan = vchan->phy; 889 unsigned long flags; 890 891 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); 892 893 spin_lock_irqsave(&vchan->vc.lock, flags); 894 895 if (pchan) { 896 writel(DMA_CHAN_PAUSE_RESUME, 897 pchan->base + DMA_CHAN_PAUSE); 898 } else if (!list_empty(&vchan->vc.desc_issued)) { 899 spin_lock(&sdev->lock); 900 list_add_tail(&vchan->node, &sdev->pending); 901 spin_unlock(&sdev->lock); 902 } 903 904 spin_unlock_irqrestore(&vchan->vc.lock, flags); 905 906 return 0; 907 } 908 909 static int sun6i_dma_terminate_all(struct dma_chan *chan) 910 { 911 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 912 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 913 struct sun6i_pchan *pchan = vchan->phy; 914 unsigned long flags; 915 LIST_HEAD(head); 916 917 spin_lock(&sdev->lock); 918 list_del_init(&vchan->node); 919 spin_unlock(&sdev->lock); 920 921 spin_lock_irqsave(&vchan->vc.lock, flags); 922 923 if (vchan->cyclic) { 924 vchan->cyclic = false; 925 if (pchan && pchan->desc) { 926 struct virt_dma_desc *vd = &pchan->desc->vd; 927 struct virt_dma_chan *vc = &vchan->vc; 928 929 list_add_tail(&vd->node, &vc->desc_completed); 930 } 931 } 932 933 vchan_get_all_descriptors(&vchan->vc, &head); 934 935 if (pchan) { 936 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); 937 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); 938 939 vchan->phy = NULL; 940 pchan->vchan = NULL; 941 pchan->desc = NULL; 942 pchan->done = NULL; 943 } 944 945 spin_unlock_irqrestore(&vchan->vc.lock, flags); 946 947 vchan_dma_desc_free_list(&vchan->vc, &head); 948 949 return 0; 950 } 951 952 static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, 953 dma_cookie_t cookie, 954 struct dma_tx_state *state) 955 { 956 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 957 struct sun6i_pchan *pchan = vchan->phy; 958 struct sun6i_dma_lli *lli; 959 struct virt_dma_desc *vd; 960 struct sun6i_desc *txd; 961 enum dma_status ret; 962 unsigned long flags; 963 size_t bytes = 0; 964 965 ret = dma_cookie_status(chan, cookie, state); 966 if (ret == DMA_COMPLETE || !state) 967 return ret; 968 969 spin_lock_irqsave(&vchan->vc.lock, flags); 970 971 vd = vchan_find_desc(&vchan->vc, cookie); 972 txd = to_sun6i_desc(&vd->tx); 973 974 if (vd) { 975 for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next) 976 bytes += lli->len; 977 } else if (!pchan || !pchan->desc) { 978 bytes = 0; 979 } else { 980 bytes = sun6i_get_chan_size(pchan); 981 } 982 983 spin_unlock_irqrestore(&vchan->vc.lock, flags); 984 985 dma_set_residue(state, bytes); 986 987 return ret; 988 } 989 990 static void sun6i_dma_issue_pending(struct dma_chan *chan) 991 { 992 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 993 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 994 unsigned long flags; 995 996 spin_lock_irqsave(&vchan->vc.lock, flags); 997 998 if (vchan_issue_pending(&vchan->vc)) { 999 spin_lock(&sdev->lock); 1000 1001 if (!vchan->phy && list_empty(&vchan->node)) { 1002 list_add_tail(&vchan->node, &sdev->pending); 1003 tasklet_schedule(&sdev->task); 1004 dev_dbg(chan2dev(chan), "vchan %p: issued\n", 1005 &vchan->vc); 1006 } 1007 1008 spin_unlock(&sdev->lock); 1009 } else { 1010 dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n", 1011 &vchan->vc); 1012 } 1013 1014 spin_unlock_irqrestore(&vchan->vc.lock, flags); 1015 } 1016 1017 static void sun6i_dma_free_chan_resources(struct dma_chan *chan) 1018 { 1019 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 1020 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 1021 unsigned long flags; 1022 1023 spin_lock_irqsave(&sdev->lock, flags); 1024 list_del_init(&vchan->node); 1025 spin_unlock_irqrestore(&sdev->lock, flags); 1026 1027 vchan_free_chan_resources(&vchan->vc); 1028 } 1029 1030 static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, 1031 struct of_dma *ofdma) 1032 { 1033 struct sun6i_dma_dev *sdev = ofdma->of_dma_data; 1034 struct sun6i_vchan *vchan; 1035 struct dma_chan *chan; 1036 u8 port = dma_spec->args[0]; 1037 1038 if (port > sdev->max_request) 1039 return NULL; 1040 1041 chan = dma_get_any_slave_channel(&sdev->slave); 1042 if (!chan) 1043 return NULL; 1044 1045 vchan = to_sun6i_vchan(chan); 1046 vchan->port = port; 1047 1048 return chan; 1049 } 1050 1051 static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) 1052 { 1053 /* Disable all interrupts from DMA */ 1054 writel(0, sdev->base + DMA_IRQ_EN(0)); 1055 writel(0, sdev->base + DMA_IRQ_EN(1)); 1056 1057 /* Prevent spurious interrupts from scheduling the tasklet */ 1058 atomic_inc(&sdev->tasklet_shutdown); 1059 1060 /* Make sure we won't have any further interrupts */ 1061 devm_free_irq(sdev->slave.dev, sdev->irq, sdev); 1062 1063 /* Actually prevent the tasklet from being scheduled */ 1064 tasklet_kill(&sdev->task); 1065 } 1066 1067 static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) 1068 { 1069 int i; 1070 1071 for (i = 0; i < sdev->num_vchans; i++) { 1072 struct sun6i_vchan *vchan = &sdev->vchans[i]; 1073 1074 list_del(&vchan->vc.chan.device_node); 1075 tasklet_kill(&vchan->vc.task); 1076 } 1077 } 1078 1079 /* 1080 * For A31: 1081 * 1082 * There's 16 physical channels that can work in parallel. 1083 * 1084 * However we have 30 different endpoints for our requests. 1085 * 1086 * Since the channels are able to handle only an unidirectional 1087 * transfer, we need to allocate more virtual channels so that 1088 * everyone can grab one channel. 1089 * 1090 * Some devices can't work in both direction (mostly because it 1091 * wouldn't make sense), so we have a bit fewer virtual channels than 1092 * 2 channels per endpoints. 1093 */ 1094 1095 static struct sun6i_dma_config sun6i_a31_dma_cfg = { 1096 .nr_max_channels = 16, 1097 .nr_max_requests = 30, 1098 .nr_max_vchans = 53, 1099 .set_burst_length = sun6i_set_burst_length_a31, 1100 .set_drq = sun6i_set_drq_a31, 1101 .set_mode = sun6i_set_mode_a31, 1102 .src_burst_lengths = BIT(1) | BIT(8), 1103 .dst_burst_lengths = BIT(1) | BIT(8), 1104 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1105 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1106 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1107 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1108 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1109 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1110 }; 1111 1112 /* 1113 * The A23 only has 8 physical channels, a maximum DRQ port id of 24, 1114 * and a total of 37 usable source and destination endpoints. 1115 */ 1116 1117 static struct sun6i_dma_config sun8i_a23_dma_cfg = { 1118 .nr_max_channels = 8, 1119 .nr_max_requests = 24, 1120 .nr_max_vchans = 37, 1121 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1122 .set_burst_length = sun6i_set_burst_length_a31, 1123 .set_drq = sun6i_set_drq_a31, 1124 .set_mode = sun6i_set_mode_a31, 1125 .src_burst_lengths = BIT(1) | BIT(8), 1126 .dst_burst_lengths = BIT(1) | BIT(8), 1127 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1128 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1129 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1130 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1131 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1132 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1133 }; 1134 1135 static struct sun6i_dma_config sun8i_a83t_dma_cfg = { 1136 .nr_max_channels = 8, 1137 .nr_max_requests = 28, 1138 .nr_max_vchans = 39, 1139 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1140 .set_burst_length = sun6i_set_burst_length_a31, 1141 .set_drq = sun6i_set_drq_a31, 1142 .set_mode = sun6i_set_mode_a31, 1143 .src_burst_lengths = BIT(1) | BIT(8), 1144 .dst_burst_lengths = BIT(1) | BIT(8), 1145 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1146 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1147 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1148 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1149 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1150 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1151 }; 1152 1153 /* 1154 * The H3 has 12 physical channels, a maximum DRQ port id of 27, 1155 * and a total of 34 usable source and destination endpoints. 1156 * It also supports additional burst lengths and bus widths, 1157 * and the burst length fields have different offsets. 1158 */ 1159 1160 static struct sun6i_dma_config sun8i_h3_dma_cfg = { 1161 .nr_max_channels = 12, 1162 .nr_max_requests = 27, 1163 .nr_max_vchans = 34, 1164 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1165 .set_burst_length = sun6i_set_burst_length_h3, 1166 .set_drq = sun6i_set_drq_a31, 1167 .set_mode = sun6i_set_mode_a31, 1168 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1169 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1170 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1171 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1172 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1173 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1174 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1175 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1176 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1177 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1178 }; 1179 1180 /* 1181 * The A64 binding uses the number of dma channels from the 1182 * device tree node. 1183 */ 1184 static struct sun6i_dma_config sun50i_a64_dma_cfg = { 1185 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1186 .set_burst_length = sun6i_set_burst_length_h3, 1187 .set_drq = sun6i_set_drq_a31, 1188 .set_mode = sun6i_set_mode_a31, 1189 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1190 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1191 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1192 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1193 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1194 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1195 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1196 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1197 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1198 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1199 }; 1200 1201 /* 1202 * The A100 binding uses the number of dma channels from the 1203 * device tree node. 1204 */ 1205 static struct sun6i_dma_config sun50i_a100_dma_cfg = { 1206 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1207 .set_burst_length = sun6i_set_burst_length_h3, 1208 .set_drq = sun6i_set_drq_h6, 1209 .set_mode = sun6i_set_mode_h6, 1210 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1211 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1212 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1213 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1214 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1215 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1216 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1217 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1218 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1219 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1220 .has_high_addr = true, 1221 .has_mbus_clk = true, 1222 }; 1223 1224 /* 1225 * The H6 binding uses the number of dma channels from the 1226 * device tree node. 1227 */ 1228 static struct sun6i_dma_config sun50i_h6_dma_cfg = { 1229 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1230 .set_burst_length = sun6i_set_burst_length_h3, 1231 .set_drq = sun6i_set_drq_h6, 1232 .set_mode = sun6i_set_mode_h6, 1233 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1234 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1235 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1236 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1237 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1238 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1239 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1240 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1241 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1242 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1243 .has_mbus_clk = true, 1244 }; 1245 1246 /* 1247 * The V3s have only 8 physical channels, a maximum DRQ port id of 23, 1248 * and a total of 24 usable source and destination endpoints. 1249 */ 1250 1251 static struct sun6i_dma_config sun8i_v3s_dma_cfg = { 1252 .nr_max_channels = 8, 1253 .nr_max_requests = 23, 1254 .nr_max_vchans = 24, 1255 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1256 .set_burst_length = sun6i_set_burst_length_a31, 1257 .set_drq = sun6i_set_drq_a31, 1258 .set_mode = sun6i_set_mode_a31, 1259 .src_burst_lengths = BIT(1) | BIT(8), 1260 .dst_burst_lengths = BIT(1) | BIT(8), 1261 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1262 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1263 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1264 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1265 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1266 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), 1267 }; 1268 1269 static const struct of_device_id sun6i_dma_match[] = { 1270 { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, 1271 { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, 1272 { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, 1273 { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, 1274 { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, 1275 { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg }, 1276 { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, 1277 { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg }, 1278 { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, 1279 { /* sentinel */ } 1280 }; 1281 MODULE_DEVICE_TABLE(of, sun6i_dma_match); 1282 1283 static int sun6i_dma_probe(struct platform_device *pdev) 1284 { 1285 struct device_node *np = pdev->dev.of_node; 1286 struct sun6i_dma_dev *sdc; 1287 int ret, i; 1288 1289 sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); 1290 if (!sdc) 1291 return -ENOMEM; 1292 1293 sdc->cfg = of_device_get_match_data(&pdev->dev); 1294 if (!sdc->cfg) 1295 return -ENODEV; 1296 1297 sdc->base = devm_platform_ioremap_resource(pdev, 0); 1298 if (IS_ERR(sdc->base)) 1299 return PTR_ERR(sdc->base); 1300 1301 sdc->irq = platform_get_irq(pdev, 0); 1302 if (sdc->irq < 0) 1303 return sdc->irq; 1304 1305 sdc->clk = devm_clk_get(&pdev->dev, NULL); 1306 if (IS_ERR(sdc->clk)) { 1307 dev_err(&pdev->dev, "No clock specified\n"); 1308 return PTR_ERR(sdc->clk); 1309 } 1310 1311 if (sdc->cfg->has_mbus_clk) { 1312 sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus"); 1313 if (IS_ERR(sdc->clk_mbus)) { 1314 dev_err(&pdev->dev, "No mbus clock specified\n"); 1315 return PTR_ERR(sdc->clk_mbus); 1316 } 1317 } 1318 1319 sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); 1320 if (IS_ERR(sdc->rstc)) { 1321 dev_err(&pdev->dev, "No reset controller specified\n"); 1322 return PTR_ERR(sdc->rstc); 1323 } 1324 1325 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, 1326 sizeof(struct sun6i_dma_lli), 4, 0); 1327 if (!sdc->pool) { 1328 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1329 return -ENOMEM; 1330 } 1331 1332 platform_set_drvdata(pdev, sdc); 1333 INIT_LIST_HEAD(&sdc->pending); 1334 spin_lock_init(&sdc->lock); 1335 1336 dma_set_max_seg_size(&pdev->dev, SZ_32M - 1); 1337 1338 dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); 1339 dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); 1340 dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); 1341 dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask); 1342 1343 INIT_LIST_HEAD(&sdc->slave.channels); 1344 sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; 1345 sdc->slave.device_tx_status = sun6i_dma_tx_status; 1346 sdc->slave.device_issue_pending = sun6i_dma_issue_pending; 1347 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 1348 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 1349 sdc->slave.device_prep_dma_cyclic = sun6i_dma_prep_dma_cyclic; 1350 sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES; 1351 sdc->slave.device_config = sun6i_dma_config; 1352 sdc->slave.device_pause = sun6i_dma_pause; 1353 sdc->slave.device_resume = sun6i_dma_resume; 1354 sdc->slave.device_terminate_all = sun6i_dma_terminate_all; 1355 sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths; 1356 sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths; 1357 sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | 1358 BIT(DMA_MEM_TO_DEV); 1359 sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1360 sdc->slave.dev = &pdev->dev; 1361 1362 sdc->num_pchans = sdc->cfg->nr_max_channels; 1363 sdc->num_vchans = sdc->cfg->nr_max_vchans; 1364 sdc->max_request = sdc->cfg->nr_max_requests; 1365 1366 ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans); 1367 if (ret && !sdc->num_pchans) { 1368 dev_err(&pdev->dev, "Can't get dma-channels.\n"); 1369 return ret; 1370 } 1371 1372 ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); 1373 if (ret && !sdc->max_request) { 1374 dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", 1375 DMA_CHAN_MAX_DRQ_A31); 1376 sdc->max_request = DMA_CHAN_MAX_DRQ_A31; 1377 } 1378 1379 /* 1380 * If the number of vchans is not specified, derive it from the 1381 * highest port number, at most one channel per port and direction. 1382 */ 1383 if (!sdc->num_vchans) 1384 sdc->num_vchans = 2 * (sdc->max_request + 1); 1385 1386 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans, 1387 sizeof(struct sun6i_pchan), GFP_KERNEL); 1388 if (!sdc->pchans) 1389 return -ENOMEM; 1390 1391 sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans, 1392 sizeof(struct sun6i_vchan), GFP_KERNEL); 1393 if (!sdc->vchans) 1394 return -ENOMEM; 1395 1396 tasklet_setup(&sdc->task, sun6i_dma_tasklet); 1397 1398 for (i = 0; i < sdc->num_pchans; i++) { 1399 struct sun6i_pchan *pchan = &sdc->pchans[i]; 1400 1401 pchan->idx = i; 1402 pchan->base = sdc->base + 0x100 + i * 0x40; 1403 } 1404 1405 for (i = 0; i < sdc->num_vchans; i++) { 1406 struct sun6i_vchan *vchan = &sdc->vchans[i]; 1407 1408 INIT_LIST_HEAD(&vchan->node); 1409 vchan->vc.desc_free = sun6i_dma_free_desc; 1410 vchan_init(&vchan->vc, &sdc->slave); 1411 } 1412 1413 ret = reset_control_deassert(sdc->rstc); 1414 if (ret) { 1415 dev_err(&pdev->dev, "Couldn't deassert the device from reset\n"); 1416 goto err_chan_free; 1417 } 1418 1419 ret = clk_prepare_enable(sdc->clk); 1420 if (ret) { 1421 dev_err(&pdev->dev, "Couldn't enable the clock\n"); 1422 goto err_reset_assert; 1423 } 1424 1425 if (sdc->cfg->has_mbus_clk) { 1426 ret = clk_prepare_enable(sdc->clk_mbus); 1427 if (ret) { 1428 dev_err(&pdev->dev, "Couldn't enable mbus clock\n"); 1429 goto err_clk_disable; 1430 } 1431 } 1432 1433 ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, 1434 dev_name(&pdev->dev), sdc); 1435 if (ret) { 1436 dev_err(&pdev->dev, "Cannot request IRQ\n"); 1437 goto err_mbus_clk_disable; 1438 } 1439 1440 ret = dma_async_device_register(&sdc->slave); 1441 if (ret) { 1442 dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); 1443 goto err_irq_disable; 1444 } 1445 1446 ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate, 1447 sdc); 1448 if (ret) { 1449 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); 1450 goto err_dma_unregister; 1451 } 1452 1453 if (sdc->cfg->clock_autogate_enable) 1454 sdc->cfg->clock_autogate_enable(sdc); 1455 1456 return 0; 1457 1458 err_dma_unregister: 1459 dma_async_device_unregister(&sdc->slave); 1460 err_irq_disable: 1461 sun6i_kill_tasklet(sdc); 1462 err_mbus_clk_disable: 1463 clk_disable_unprepare(sdc->clk_mbus); 1464 err_clk_disable: 1465 clk_disable_unprepare(sdc->clk); 1466 err_reset_assert: 1467 reset_control_assert(sdc->rstc); 1468 err_chan_free: 1469 sun6i_dma_free(sdc); 1470 return ret; 1471 } 1472 1473 static int sun6i_dma_remove(struct platform_device *pdev) 1474 { 1475 struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); 1476 1477 of_dma_controller_free(pdev->dev.of_node); 1478 dma_async_device_unregister(&sdc->slave); 1479 1480 sun6i_kill_tasklet(sdc); 1481 1482 clk_disable_unprepare(sdc->clk_mbus); 1483 clk_disable_unprepare(sdc->clk); 1484 reset_control_assert(sdc->rstc); 1485 1486 sun6i_dma_free(sdc); 1487 1488 return 0; 1489 } 1490 1491 static struct platform_driver sun6i_dma_driver = { 1492 .probe = sun6i_dma_probe, 1493 .remove = sun6i_dma_remove, 1494 .driver = { 1495 .name = "sun6i-dma", 1496 .of_match_table = sun6i_dma_match, 1497 }, 1498 }; 1499 module_platform_driver(sun6i_dma_driver); 1500 1501 MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver"); 1502 MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>"); 1503 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); 1504 MODULE_LICENSE("GPL"); 1505