1 /* 2 * DMA driver for Xilinx Video DMA Engine 3 * 4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 5 * 6 * Based on the Freescale DMA driver. 7 * 8 * Description: 9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 10 * core that provides high-bandwidth direct memory access between memory 11 * and AXI4-Stream type video target peripherals. The core provides efficient 12 * two dimensional DMA operations with independent asynchronous read (S2MM) 13 * and write (MM2S) channel operation. It can be configured to have either 14 * one channel or two channels. If configured as two channels, one is to 15 * transmit to the video device (MM2S) and another is to receive from the 16 * video device (S2MM). Initialization, status, interrupt and management 17 * registers are accessed through an AXI4-Lite slave interface. 18 * 19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 20 * provides high-bandwidth one dimensional direct memory access between memory 21 * and AXI4-Stream target peripherals. It supports one receive and one 22 * transmit channel, both of them optional at synthesis time. 23 * 24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 25 * Access (DMA) between a memory-mapped source address and a memory-mapped 26 * destination address. 27 * 28 * This program is free software: you can redistribute it and/or modify 29 * it under the terms of the GNU General Public License as published by 30 * the Free Software Foundation, either version 2 of the License, or 31 * (at your option) any later version. 32 */ 33 34 #include <linux/bitops.h> 35 #include <linux/dmapool.h> 36 #include <linux/dma/xilinx_dma.h> 37 #include <linux/init.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/iopoll.h> 41 #include <linux/module.h> 42 #include <linux/of_address.h> 43 #include <linux/of_dma.h> 44 #include <linux/of_platform.h> 45 #include <linux/of_irq.h> 46 #include <linux/slab.h> 47 #include <linux/clk.h> 48 #include <linux/io-64-nonatomic-lo-hi.h> 49 50 #include "../dmaengine.h" 51 52 /* Register/Descriptor Offsets */ 53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 57 58 /* Control Registers */ 59 #define XILINX_DMA_REG_DMACR 0x0000 60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 71 #define XILINX_DMA_DMACR_RESET BIT(2) 72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 75 76 #define XILINX_DMA_REG_DMASR 0x0004 77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 89 #define XILINX_DMA_DMASR_IDLE BIT(1) 90 #define XILINX_DMA_DMASR_HALTED BIT(0) 91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 93 94 #define XILINX_DMA_REG_CURDESC 0x0008 95 #define XILINX_DMA_REG_TAILDESC 0x0010 96 #define XILINX_DMA_REG_REG_INDEX 0x0014 97 #define XILINX_DMA_REG_FRMSTORE 0x0018 98 #define XILINX_DMA_REG_THRESHOLD 0x001c 99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 100 #define XILINX_DMA_REG_PARK_PTR 0x0028 101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 104 105 /* Register Direct Mode Registers */ 106 #define XILINX_DMA_REG_VSIZE 0x0000 107 #define XILINX_DMA_REG_HSIZE 0x0004 108 109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 112 113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 115 116 /* HW specific definitions */ 117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 118 119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 122 XILINX_DMA_DMASR_ERR_IRQ) 123 124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 126 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 127 XILINX_DMA_DMASR_SG_DEC_ERR | \ 128 XILINX_DMA_DMASR_SG_SLV_ERR | \ 129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 131 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 133 XILINX_DMA_DMASR_DMA_INT_ERR) 134 135 /* 136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 138 * is enabled in the h/w system. 139 */ 140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 144 XILINX_DMA_DMASR_DMA_INT_ERR) 145 146 /* Axi VDMA Flush on Fsync bits */ 147 #define XILINX_DMA_FLUSH_S2MM 3 148 #define XILINX_DMA_FLUSH_MM2S 2 149 #define XILINX_DMA_FLUSH_BOTH 1 150 151 /* Delay loop counter to prevent hardware failure */ 152 #define XILINX_DMA_LOOP_COUNT 1000000 153 154 /* AXI DMA Specific Registers/Offsets */ 155 #define XILINX_DMA_REG_SRCDSTADDR 0x18 156 #define XILINX_DMA_REG_BTT 0x28 157 158 /* AXI DMA Specific Masks/Bit fields */ 159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) 160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 162 #define XILINX_DMA_CR_COALESCE_SHIFT 16 163 #define XILINX_DMA_BD_SOP BIT(27) 164 #define XILINX_DMA_BD_EOP BIT(26) 165 #define XILINX_DMA_COALESCE_MAX 255 166 #define XILINX_DMA_NUM_APP_WORDS 5 167 168 /* Multi-Channel DMA Descriptor offsets*/ 169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 171 172 /* Multi-Channel DMA Masks/Shifts */ 173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 177 #define XILINX_DMA_BD_STRIDE_SHIFT 0 178 #define XILINX_DMA_BD_VSIZE_SHIFT 19 179 180 /* AXI CDMA Specific Registers/Offsets */ 181 #define XILINX_CDMA_REG_SRCADDR 0x18 182 #define XILINX_CDMA_REG_DSTADDR 0x20 183 184 /* AXI CDMA Specific Masks */ 185 #define XILINX_CDMA_CR_SGMODE BIT(3) 186 187 /** 188 * struct xilinx_vdma_desc_hw - Hardware Descriptor 189 * @next_desc: Next Descriptor Pointer @0x00 190 * @pad1: Reserved @0x04 191 * @buf_addr: Buffer address @0x08 192 * @buf_addr_msb: MSB of Buffer address @0x0C 193 * @vsize: Vertical Size @0x10 194 * @hsize: Horizontal Size @0x14 195 * @stride: Number of bytes between the first 196 * pixels of each horizontal line @0x18 197 */ 198 struct xilinx_vdma_desc_hw { 199 u32 next_desc; 200 u32 pad1; 201 u32 buf_addr; 202 u32 buf_addr_msb; 203 u32 vsize; 204 u32 hsize; 205 u32 stride; 206 } __aligned(64); 207 208 /** 209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 210 * @next_desc: Next Descriptor Pointer @0x00 211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 212 * @buf_addr: Buffer address @0x08 213 * @buf_addr_msb: MSB of Buffer address @0x0C 214 * @pad1: Reserved @0x10 215 * @pad2: Reserved @0x14 216 * @control: Control field @0x18 217 * @status: Status field @0x1C 218 * @app: APP Fields @0x20 - 0x30 219 */ 220 struct xilinx_axidma_desc_hw { 221 u32 next_desc; 222 u32 next_desc_msb; 223 u32 buf_addr; 224 u32 buf_addr_msb; 225 u32 mcdma_control; 226 u32 vsize_stride; 227 u32 control; 228 u32 status; 229 u32 app[XILINX_DMA_NUM_APP_WORDS]; 230 } __aligned(64); 231 232 /** 233 * struct xilinx_cdma_desc_hw - Hardware Descriptor 234 * @next_desc: Next Descriptor Pointer @0x00 235 * @next_descmsb: Next Descriptor Pointer MSB @0x04 236 * @src_addr: Source address @0x08 237 * @src_addrmsb: Source address MSB @0x0C 238 * @dest_addr: Destination address @0x10 239 * @dest_addrmsb: Destination address MSB @0x14 240 * @control: Control field @0x18 241 * @status: Status field @0x1C 242 */ 243 struct xilinx_cdma_desc_hw { 244 u32 next_desc; 245 u32 next_desc_msb; 246 u32 src_addr; 247 u32 src_addr_msb; 248 u32 dest_addr; 249 u32 dest_addr_msb; 250 u32 control; 251 u32 status; 252 } __aligned(64); 253 254 /** 255 * struct xilinx_vdma_tx_segment - Descriptor segment 256 * @hw: Hardware descriptor 257 * @node: Node in the descriptor segments list 258 * @phys: Physical address of segment 259 */ 260 struct xilinx_vdma_tx_segment { 261 struct xilinx_vdma_desc_hw hw; 262 struct list_head node; 263 dma_addr_t phys; 264 } __aligned(64); 265 266 /** 267 * struct xilinx_axidma_tx_segment - Descriptor segment 268 * @hw: Hardware descriptor 269 * @node: Node in the descriptor segments list 270 * @phys: Physical address of segment 271 */ 272 struct xilinx_axidma_tx_segment { 273 struct xilinx_axidma_desc_hw hw; 274 struct list_head node; 275 dma_addr_t phys; 276 } __aligned(64); 277 278 /** 279 * struct xilinx_cdma_tx_segment - Descriptor segment 280 * @hw: Hardware descriptor 281 * @node: Node in the descriptor segments list 282 * @phys: Physical address of segment 283 */ 284 struct xilinx_cdma_tx_segment { 285 struct xilinx_cdma_desc_hw hw; 286 struct list_head node; 287 dma_addr_t phys; 288 } __aligned(64); 289 290 /** 291 * struct xilinx_dma_tx_descriptor - Per Transaction structure 292 * @async_tx: Async transaction descriptor 293 * @segments: TX segments list 294 * @node: Node in the channel descriptors list 295 * @cyclic: Check for cyclic transfers. 296 */ 297 struct xilinx_dma_tx_descriptor { 298 struct dma_async_tx_descriptor async_tx; 299 struct list_head segments; 300 struct list_head node; 301 bool cyclic; 302 }; 303 304 /** 305 * struct xilinx_dma_chan - Driver specific DMA channel structure 306 * @xdev: Driver specific device structure 307 * @ctrl_offset: Control registers offset 308 * @desc_offset: TX descriptor registers offset 309 * @lock: Descriptor operation lock 310 * @pending_list: Descriptors waiting 311 * @active_list: Descriptors ready to submit 312 * @done_list: Complete descriptors 313 * @common: DMA common channel 314 * @desc_pool: Descriptors pool 315 * @dev: The dma device 316 * @irq: Channel IRQ 317 * @id: Channel ID 318 * @direction: Transfer direction 319 * @num_frms: Number of frames 320 * @has_sg: Support scatter transfers 321 * @cyclic: Check for cyclic transfers. 322 * @genlock: Support genlock mode 323 * @err: Channel has errors 324 * @tasklet: Cleanup work after irq 325 * @config: Device configuration info 326 * @flush_on_fsync: Flush on Frame sync 327 * @desc_pendingcount: Descriptor pending count 328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 329 * @desc_submitcount: Descriptor h/w submitted count 330 * @residue: Residue for AXI DMA 331 * @seg_v: Statically allocated segments base 332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 333 * @start_transfer: Differentiate b/w DMA IP's transfer 334 */ 335 struct xilinx_dma_chan { 336 struct xilinx_dma_device *xdev; 337 u32 ctrl_offset; 338 u32 desc_offset; 339 spinlock_t lock; 340 struct list_head pending_list; 341 struct list_head active_list; 342 struct list_head done_list; 343 struct dma_chan common; 344 struct dma_pool *desc_pool; 345 struct device *dev; 346 int irq; 347 int id; 348 enum dma_transfer_direction direction; 349 int num_frms; 350 bool has_sg; 351 bool cyclic; 352 bool genlock; 353 bool err; 354 struct tasklet_struct tasklet; 355 struct xilinx_vdma_config config; 356 bool flush_on_fsync; 357 u32 desc_pendingcount; 358 bool ext_addr; 359 u32 desc_submitcount; 360 u32 residue; 361 struct xilinx_axidma_tx_segment *seg_v; 362 struct xilinx_axidma_tx_segment *cyclic_seg_v; 363 void (*start_transfer)(struct xilinx_dma_chan *chan); 364 u16 tdest; 365 }; 366 367 struct xilinx_dma_config { 368 enum xdma_ip_type dmatype; 369 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 370 struct clk **tx_clk, struct clk **txs_clk, 371 struct clk **rx_clk, struct clk **rxs_clk); 372 }; 373 374 /** 375 * struct xilinx_dma_device - DMA device structure 376 * @regs: I/O mapped base address 377 * @dev: Device Structure 378 * @common: DMA device structure 379 * @chan: Driver specific DMA channel 380 * @has_sg: Specifies whether Scatter-Gather is present or not 381 * @mcdma: Specifies whether Multi-Channel is present or not 382 * @flush_on_fsync: Flush on frame sync 383 * @ext_addr: Indicates 64 bit addressing is supported by dma device 384 * @pdev: Platform device structure pointer 385 * @dma_config: DMA config structure 386 * @axi_clk: DMA Axi4-lite interace clock 387 * @tx_clk: DMA mm2s clock 388 * @txs_clk: DMA mm2s stream clock 389 * @rx_clk: DMA s2mm clock 390 * @rxs_clk: DMA s2mm stream clock 391 * @nr_channels: Number of channels DMA device supports 392 * @chan_id: DMA channel identifier 393 */ 394 struct xilinx_dma_device { 395 void __iomem *regs; 396 struct device *dev; 397 struct dma_device common; 398 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 399 bool has_sg; 400 bool mcdma; 401 u32 flush_on_fsync; 402 bool ext_addr; 403 struct platform_device *pdev; 404 const struct xilinx_dma_config *dma_config; 405 struct clk *axi_clk; 406 struct clk *tx_clk; 407 struct clk *txs_clk; 408 struct clk *rx_clk; 409 struct clk *rxs_clk; 410 u32 nr_channels; 411 u32 chan_id; 412 }; 413 414 /* Macros */ 415 #define to_xilinx_chan(chan) \ 416 container_of(chan, struct xilinx_dma_chan, common) 417 #define to_dma_tx_descriptor(tx) \ 418 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 419 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 420 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 421 cond, delay_us, timeout_us) 422 423 /* IO accessors */ 424 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 425 { 426 return ioread32(chan->xdev->regs + reg); 427 } 428 429 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 430 { 431 iowrite32(value, chan->xdev->regs + reg); 432 } 433 434 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 435 u32 value) 436 { 437 dma_write(chan, chan->desc_offset + reg, value); 438 } 439 440 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 441 { 442 return dma_read(chan, chan->ctrl_offset + reg); 443 } 444 445 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 446 u32 value) 447 { 448 dma_write(chan, chan->ctrl_offset + reg, value); 449 } 450 451 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 452 u32 clr) 453 { 454 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 455 } 456 457 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 458 u32 set) 459 { 460 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 461 } 462 463 /** 464 * vdma_desc_write_64 - 64-bit descriptor write 465 * @chan: Driver specific VDMA channel 466 * @reg: Register to write 467 * @value_lsb: lower address of the descriptor. 468 * @value_msb: upper address of the descriptor. 469 * 470 * Since vdma driver is trying to write to a register offset which is not a 471 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 472 * instead of a single 64 bit register write. 473 */ 474 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 475 u32 value_lsb, u32 value_msb) 476 { 477 /* Write the lsb 32 bits*/ 478 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 479 480 /* Write the msb 32 bits */ 481 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 482 } 483 484 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 485 { 486 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 487 } 488 489 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 490 dma_addr_t addr) 491 { 492 if (chan->ext_addr) 493 dma_writeq(chan, reg, addr); 494 else 495 dma_ctrl_write(chan, reg, addr); 496 } 497 498 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 499 struct xilinx_axidma_desc_hw *hw, 500 dma_addr_t buf_addr, size_t sg_used, 501 size_t period_len) 502 { 503 if (chan->ext_addr) { 504 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 505 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 506 period_len); 507 } else { 508 hw->buf_addr = buf_addr + sg_used + period_len; 509 } 510 } 511 512 /* ----------------------------------------------------------------------------- 513 * Descriptors and segments alloc and free 514 */ 515 516 /** 517 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 518 * @chan: Driver specific DMA channel 519 * 520 * Return: The allocated segment on success and NULL on failure. 521 */ 522 static struct xilinx_vdma_tx_segment * 523 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 524 { 525 struct xilinx_vdma_tx_segment *segment; 526 dma_addr_t phys; 527 528 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 529 if (!segment) 530 return NULL; 531 532 segment->phys = phys; 533 534 return segment; 535 } 536 537 /** 538 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 539 * @chan: Driver specific DMA channel 540 * 541 * Return: The allocated segment on success and NULL on failure. 542 */ 543 static struct xilinx_cdma_tx_segment * 544 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 545 { 546 struct xilinx_cdma_tx_segment *segment; 547 dma_addr_t phys; 548 549 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 550 if (!segment) 551 return NULL; 552 553 segment->phys = phys; 554 555 return segment; 556 } 557 558 /** 559 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 560 * @chan: Driver specific DMA channel 561 * 562 * Return: The allocated segment on success and NULL on failure. 563 */ 564 static struct xilinx_axidma_tx_segment * 565 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 566 { 567 struct xilinx_axidma_tx_segment *segment; 568 dma_addr_t phys; 569 570 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 571 if (!segment) 572 return NULL; 573 574 segment->phys = phys; 575 576 return segment; 577 } 578 579 /** 580 * xilinx_dma_free_tx_segment - Free transaction segment 581 * @chan: Driver specific DMA channel 582 * @segment: DMA transaction segment 583 */ 584 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 585 struct xilinx_axidma_tx_segment *segment) 586 { 587 dma_pool_free(chan->desc_pool, segment, segment->phys); 588 } 589 590 /** 591 * xilinx_cdma_free_tx_segment - Free transaction segment 592 * @chan: Driver specific DMA channel 593 * @segment: DMA transaction segment 594 */ 595 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 596 struct xilinx_cdma_tx_segment *segment) 597 { 598 dma_pool_free(chan->desc_pool, segment, segment->phys); 599 } 600 601 /** 602 * xilinx_vdma_free_tx_segment - Free transaction segment 603 * @chan: Driver specific DMA channel 604 * @segment: DMA transaction segment 605 */ 606 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 607 struct xilinx_vdma_tx_segment *segment) 608 { 609 dma_pool_free(chan->desc_pool, segment, segment->phys); 610 } 611 612 /** 613 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 614 * @chan: Driver specific DMA channel 615 * 616 * Return: The allocated descriptor on success and NULL on failure. 617 */ 618 static struct xilinx_dma_tx_descriptor * 619 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 620 { 621 struct xilinx_dma_tx_descriptor *desc; 622 623 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 624 if (!desc) 625 return NULL; 626 627 INIT_LIST_HEAD(&desc->segments); 628 629 return desc; 630 } 631 632 /** 633 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 634 * @chan: Driver specific DMA channel 635 * @desc: DMA transaction descriptor 636 */ 637 static void 638 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 639 struct xilinx_dma_tx_descriptor *desc) 640 { 641 struct xilinx_vdma_tx_segment *segment, *next; 642 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 643 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 644 645 if (!desc) 646 return; 647 648 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 649 list_for_each_entry_safe(segment, next, &desc->segments, node) { 650 list_del(&segment->node); 651 xilinx_vdma_free_tx_segment(chan, segment); 652 } 653 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 654 list_for_each_entry_safe(cdma_segment, cdma_next, 655 &desc->segments, node) { 656 list_del(&cdma_segment->node); 657 xilinx_cdma_free_tx_segment(chan, cdma_segment); 658 } 659 } else { 660 list_for_each_entry_safe(axidma_segment, axidma_next, 661 &desc->segments, node) { 662 list_del(&axidma_segment->node); 663 xilinx_dma_free_tx_segment(chan, axidma_segment); 664 } 665 } 666 667 kfree(desc); 668 } 669 670 /* Required functions */ 671 672 /** 673 * xilinx_dma_free_desc_list - Free descriptors list 674 * @chan: Driver specific DMA channel 675 * @list: List to parse and delete the descriptor 676 */ 677 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 678 struct list_head *list) 679 { 680 struct xilinx_dma_tx_descriptor *desc, *next; 681 682 list_for_each_entry_safe(desc, next, list, node) { 683 list_del(&desc->node); 684 xilinx_dma_free_tx_descriptor(chan, desc); 685 } 686 } 687 688 /** 689 * xilinx_dma_free_descriptors - Free channel descriptors 690 * @chan: Driver specific DMA channel 691 */ 692 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 693 { 694 unsigned long flags; 695 696 spin_lock_irqsave(&chan->lock, flags); 697 698 xilinx_dma_free_desc_list(chan, &chan->pending_list); 699 xilinx_dma_free_desc_list(chan, &chan->done_list); 700 xilinx_dma_free_desc_list(chan, &chan->active_list); 701 702 spin_unlock_irqrestore(&chan->lock, flags); 703 } 704 705 /** 706 * xilinx_dma_free_chan_resources - Free channel resources 707 * @dchan: DMA channel 708 */ 709 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 710 { 711 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 712 713 dev_dbg(chan->dev, "Free all channel resources.\n"); 714 715 xilinx_dma_free_descriptors(chan); 716 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 717 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); 718 xilinx_dma_free_tx_segment(chan, chan->seg_v); 719 } 720 dma_pool_destroy(chan->desc_pool); 721 chan->desc_pool = NULL; 722 } 723 724 /** 725 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 726 * @chan: Driver specific dma channel 727 * @desc: dma transaction descriptor 728 * @flags: flags for spin lock 729 */ 730 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 731 struct xilinx_dma_tx_descriptor *desc, 732 unsigned long *flags) 733 { 734 dma_async_tx_callback callback; 735 void *callback_param; 736 737 callback = desc->async_tx.callback; 738 callback_param = desc->async_tx.callback_param; 739 if (callback) { 740 spin_unlock_irqrestore(&chan->lock, *flags); 741 callback(callback_param); 742 spin_lock_irqsave(&chan->lock, *flags); 743 } 744 } 745 746 /** 747 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 748 * @chan: Driver specific DMA channel 749 */ 750 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 751 { 752 struct xilinx_dma_tx_descriptor *desc, *next; 753 unsigned long flags; 754 755 spin_lock_irqsave(&chan->lock, flags); 756 757 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 758 struct dmaengine_desc_callback cb; 759 760 if (desc->cyclic) { 761 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 762 break; 763 } 764 765 /* Remove from the list of running transactions */ 766 list_del(&desc->node); 767 768 /* Run the link descriptor callback function */ 769 dmaengine_desc_get_callback(&desc->async_tx, &cb); 770 if (dmaengine_desc_callback_valid(&cb)) { 771 spin_unlock_irqrestore(&chan->lock, flags); 772 dmaengine_desc_callback_invoke(&cb, NULL); 773 spin_lock_irqsave(&chan->lock, flags); 774 } 775 776 /* Run any dependencies, then free the descriptor */ 777 dma_run_dependencies(&desc->async_tx); 778 xilinx_dma_free_tx_descriptor(chan, desc); 779 } 780 781 spin_unlock_irqrestore(&chan->lock, flags); 782 } 783 784 /** 785 * xilinx_dma_do_tasklet - Schedule completion tasklet 786 * @data: Pointer to the Xilinx DMA channel structure 787 */ 788 static void xilinx_dma_do_tasklet(unsigned long data) 789 { 790 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 791 792 xilinx_dma_chan_desc_cleanup(chan); 793 } 794 795 /** 796 * xilinx_dma_alloc_chan_resources - Allocate channel resources 797 * @dchan: DMA channel 798 * 799 * Return: '0' on success and failure value on error 800 */ 801 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 802 { 803 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 804 805 /* Has this channel already been allocated? */ 806 if (chan->desc_pool) 807 return 0; 808 809 /* 810 * We need the descriptor to be aligned to 64bytes 811 * for meeting Xilinx VDMA specification requirement. 812 */ 813 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 814 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", 815 chan->dev, 816 sizeof(struct xilinx_axidma_tx_segment), 817 __alignof__(struct xilinx_axidma_tx_segment), 818 0); 819 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 820 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 821 chan->dev, 822 sizeof(struct xilinx_cdma_tx_segment), 823 __alignof__(struct xilinx_cdma_tx_segment), 824 0); 825 } else { 826 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 827 chan->dev, 828 sizeof(struct xilinx_vdma_tx_segment), 829 __alignof__(struct xilinx_vdma_tx_segment), 830 0); 831 } 832 833 if (!chan->desc_pool) { 834 dev_err(chan->dev, 835 "unable to allocate channel %d descriptor pool\n", 836 chan->id); 837 return -ENOMEM; 838 } 839 840 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 841 /* 842 * For AXI DMA case after submitting a pending_list, keep 843 * an extra segment allocated so that the "next descriptor" 844 * pointer on the tail descriptor always points to a 845 * valid descriptor, even when paused after reaching taildesc. 846 * This way, it is possible to issue additional 847 * transfers without halting and restarting the channel. 848 */ 849 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); 850 851 /* 852 * For cyclic DMA mode we need to program the tail Descriptor 853 * register with a value which is not a part of the BD chain 854 * so allocating a desc segment during channel allocation for 855 * programming tail descriptor. 856 */ 857 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); 858 } 859 860 dma_cookie_init(dchan); 861 862 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 863 /* For AXI DMA resetting once channel will reset the 864 * other channel as well so enable the interrupts here. 865 */ 866 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 867 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 868 } 869 870 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 871 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 872 XILINX_CDMA_CR_SGMODE); 873 874 return 0; 875 } 876 877 /** 878 * xilinx_dma_tx_status - Get DMA transaction status 879 * @dchan: DMA channel 880 * @cookie: Transaction identifier 881 * @txstate: Transaction state 882 * 883 * Return: DMA transaction status 884 */ 885 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 886 dma_cookie_t cookie, 887 struct dma_tx_state *txstate) 888 { 889 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 890 struct xilinx_dma_tx_descriptor *desc; 891 struct xilinx_axidma_tx_segment *segment; 892 struct xilinx_axidma_desc_hw *hw; 893 enum dma_status ret; 894 unsigned long flags; 895 u32 residue = 0; 896 897 ret = dma_cookie_status(dchan, cookie, txstate); 898 if (ret == DMA_COMPLETE || !txstate) 899 return ret; 900 901 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 902 spin_lock_irqsave(&chan->lock, flags); 903 904 desc = list_last_entry(&chan->active_list, 905 struct xilinx_dma_tx_descriptor, node); 906 if (chan->has_sg) { 907 list_for_each_entry(segment, &desc->segments, node) { 908 hw = &segment->hw; 909 residue += (hw->control - hw->status) & 910 XILINX_DMA_MAX_TRANS_LEN; 911 } 912 } 913 spin_unlock_irqrestore(&chan->lock, flags); 914 915 chan->residue = residue; 916 dma_set_residue(txstate, chan->residue); 917 } 918 919 return ret; 920 } 921 922 /** 923 * xilinx_dma_is_running - Check if DMA channel is running 924 * @chan: Driver specific DMA channel 925 * 926 * Return: '1' if running, '0' if not. 927 */ 928 static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) 929 { 930 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 931 XILINX_DMA_DMASR_HALTED) && 932 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & 933 XILINX_DMA_DMACR_RUNSTOP); 934 } 935 936 /** 937 * xilinx_dma_is_idle - Check if DMA channel is idle 938 * @chan: Driver specific DMA channel 939 * 940 * Return: '1' if idle, '0' if not. 941 */ 942 static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) 943 { 944 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 945 XILINX_DMA_DMASR_IDLE; 946 } 947 948 /** 949 * xilinx_dma_halt - Halt DMA channel 950 * @chan: Driver specific DMA channel 951 */ 952 static void xilinx_dma_halt(struct xilinx_dma_chan *chan) 953 { 954 int err; 955 u32 val; 956 957 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 958 959 /* Wait for the hardware to halt */ 960 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 961 (val & XILINX_DMA_DMASR_HALTED), 0, 962 XILINX_DMA_LOOP_COUNT); 963 964 if (err) { 965 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 966 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 967 chan->err = true; 968 } 969 } 970 971 /** 972 * xilinx_dma_start - Start DMA channel 973 * @chan: Driver specific DMA channel 974 */ 975 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 976 { 977 int err; 978 u32 val; 979 980 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 981 982 /* Wait for the hardware to start */ 983 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 984 !(val & XILINX_DMA_DMASR_HALTED), 0, 985 XILINX_DMA_LOOP_COUNT); 986 987 if (err) { 988 dev_err(chan->dev, "Cannot start channel %p: %x\n", 989 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 990 991 chan->err = true; 992 } 993 } 994 995 /** 996 * xilinx_vdma_start_transfer - Starts VDMA transfer 997 * @chan: Driver specific channel struct pointer 998 */ 999 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1000 { 1001 struct xilinx_vdma_config *config = &chan->config; 1002 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1003 u32 reg; 1004 struct xilinx_vdma_tx_segment *tail_segment; 1005 1006 /* This function was invoked with lock held */ 1007 if (chan->err) 1008 return; 1009 1010 if (list_empty(&chan->pending_list)) 1011 return; 1012 1013 desc = list_first_entry(&chan->pending_list, 1014 struct xilinx_dma_tx_descriptor, node); 1015 tail_desc = list_last_entry(&chan->pending_list, 1016 struct xilinx_dma_tx_descriptor, node); 1017 1018 tail_segment = list_last_entry(&tail_desc->segments, 1019 struct xilinx_vdma_tx_segment, node); 1020 1021 /* If it is SG mode and hardware is busy, cannot submit */ 1022 if (chan->has_sg && xilinx_dma_is_running(chan) && 1023 !xilinx_dma_is_idle(chan)) { 1024 dev_dbg(chan->dev, "DMA controller still busy\n"); 1025 return; 1026 } 1027 1028 /* 1029 * If hardware is idle, then all descriptors on the running lists are 1030 * done, start new transfers 1031 */ 1032 if (chan->has_sg) 1033 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1034 desc->async_tx.phys); 1035 1036 /* Configure the hardware using info in the config structure */ 1037 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1038 1039 if (config->frm_cnt_en) 1040 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1041 else 1042 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1043 1044 /* Configure channel to allow number frame buffers */ 1045 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, 1046 chan->desc_pendingcount); 1047 1048 /* 1049 * With SG, start with circular mode, so that BDs can be fetched. 1050 * In direct register mode, if not parking, enable circular mode 1051 */ 1052 if (chan->has_sg || !config->park) 1053 reg |= XILINX_DMA_DMACR_CIRC_EN; 1054 1055 if (config->park) 1056 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1057 1058 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1059 1060 if (config->park && (config->park_frm >= 0) && 1061 (config->park_frm < chan->num_frms)) { 1062 if (chan->direction == DMA_MEM_TO_DEV) 1063 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1064 config->park_frm << 1065 XILINX_DMA_PARK_PTR_RD_REF_SHIFT); 1066 else 1067 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1068 config->park_frm << 1069 XILINX_DMA_PARK_PTR_WR_REF_SHIFT); 1070 } 1071 1072 /* Start the hardware */ 1073 xilinx_dma_start(chan); 1074 1075 if (chan->err) 1076 return; 1077 1078 /* Start the transfer */ 1079 if (chan->has_sg) { 1080 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1081 tail_segment->phys); 1082 } else { 1083 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1084 int i = 0; 1085 1086 if (chan->desc_submitcount < chan->num_frms) 1087 i = chan->desc_submitcount; 1088 1089 list_for_each_entry(segment, &desc->segments, node) { 1090 if (chan->ext_addr) 1091 vdma_desc_write_64(chan, 1092 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1093 segment->hw.buf_addr, 1094 segment->hw.buf_addr_msb); 1095 else 1096 vdma_desc_write(chan, 1097 XILINX_VDMA_REG_START_ADDRESS(i++), 1098 segment->hw.buf_addr); 1099 1100 last = segment; 1101 } 1102 1103 if (!last) 1104 return; 1105 1106 /* HW expects these parameters to be same for one transaction */ 1107 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1108 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1109 last->hw.stride); 1110 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1111 } 1112 1113 if (!chan->has_sg) { 1114 list_del(&desc->node); 1115 list_add_tail(&desc->node, &chan->active_list); 1116 chan->desc_submitcount++; 1117 chan->desc_pendingcount--; 1118 if (chan->desc_submitcount == chan->num_frms) 1119 chan->desc_submitcount = 0; 1120 } else { 1121 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1122 chan->desc_pendingcount = 0; 1123 } 1124 } 1125 1126 /** 1127 * xilinx_cdma_start_transfer - Starts cdma transfer 1128 * @chan: Driver specific channel struct pointer 1129 */ 1130 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1131 { 1132 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1133 struct xilinx_cdma_tx_segment *tail_segment; 1134 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1135 1136 if (chan->err) 1137 return; 1138 1139 if (list_empty(&chan->pending_list)) 1140 return; 1141 1142 head_desc = list_first_entry(&chan->pending_list, 1143 struct xilinx_dma_tx_descriptor, node); 1144 tail_desc = list_last_entry(&chan->pending_list, 1145 struct xilinx_dma_tx_descriptor, node); 1146 tail_segment = list_last_entry(&tail_desc->segments, 1147 struct xilinx_cdma_tx_segment, node); 1148 1149 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1150 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1151 ctrl_reg |= chan->desc_pendingcount << 1152 XILINX_DMA_CR_COALESCE_SHIFT; 1153 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1154 } 1155 1156 if (chan->has_sg) { 1157 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1158 head_desc->async_tx.phys); 1159 1160 /* Update tail ptr register which will start the transfer */ 1161 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1162 tail_segment->phys); 1163 } else { 1164 /* In simple mode */ 1165 struct xilinx_cdma_tx_segment *segment; 1166 struct xilinx_cdma_desc_hw *hw; 1167 1168 segment = list_first_entry(&head_desc->segments, 1169 struct xilinx_cdma_tx_segment, 1170 node); 1171 1172 hw = &segment->hw; 1173 1174 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); 1175 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); 1176 1177 /* Start the transfer */ 1178 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1179 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1180 } 1181 1182 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1183 chan->desc_pendingcount = 0; 1184 } 1185 1186 /** 1187 * xilinx_dma_start_transfer - Starts DMA transfer 1188 * @chan: Driver specific channel struct pointer 1189 */ 1190 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1191 { 1192 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1193 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; 1194 u32 reg; 1195 1196 if (chan->err) 1197 return; 1198 1199 if (list_empty(&chan->pending_list)) 1200 return; 1201 1202 /* If it is SG mode and hardware is busy, cannot submit */ 1203 if (chan->has_sg && xilinx_dma_is_running(chan) && 1204 !xilinx_dma_is_idle(chan)) { 1205 dev_dbg(chan->dev, "DMA controller still busy\n"); 1206 return; 1207 } 1208 1209 head_desc = list_first_entry(&chan->pending_list, 1210 struct xilinx_dma_tx_descriptor, node); 1211 tail_desc = list_last_entry(&chan->pending_list, 1212 struct xilinx_dma_tx_descriptor, node); 1213 tail_segment = list_last_entry(&tail_desc->segments, 1214 struct xilinx_axidma_tx_segment, node); 1215 1216 if (chan->has_sg && !chan->xdev->mcdma) { 1217 old_head = list_first_entry(&head_desc->segments, 1218 struct xilinx_axidma_tx_segment, node); 1219 new_head = chan->seg_v; 1220 /* Copy Buffer Descriptor fields. */ 1221 new_head->hw = old_head->hw; 1222 1223 /* Swap and save new reserve */ 1224 list_replace_init(&old_head->node, &new_head->node); 1225 chan->seg_v = old_head; 1226 1227 tail_segment->hw.next_desc = chan->seg_v->phys; 1228 head_desc->async_tx.phys = new_head->phys; 1229 } 1230 1231 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1232 1233 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1234 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1235 reg |= chan->desc_pendingcount << 1236 XILINX_DMA_CR_COALESCE_SHIFT; 1237 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1238 } 1239 1240 if (chan->has_sg && !chan->xdev->mcdma) 1241 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1242 head_desc->async_tx.phys); 1243 1244 if (chan->has_sg && chan->xdev->mcdma) { 1245 if (chan->direction == DMA_MEM_TO_DEV) { 1246 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1247 head_desc->async_tx.phys); 1248 } else { 1249 if (!chan->tdest) { 1250 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1251 head_desc->async_tx.phys); 1252 } else { 1253 dma_ctrl_write(chan, 1254 XILINX_DMA_MCRX_CDESC(chan->tdest), 1255 head_desc->async_tx.phys); 1256 } 1257 } 1258 } 1259 1260 xilinx_dma_start(chan); 1261 1262 if (chan->err) 1263 return; 1264 1265 /* Start the transfer */ 1266 if (chan->has_sg && !chan->xdev->mcdma) { 1267 if (chan->cyclic) 1268 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1269 chan->cyclic_seg_v->phys); 1270 else 1271 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1272 tail_segment->phys); 1273 } else if (chan->has_sg && chan->xdev->mcdma) { 1274 if (chan->direction == DMA_MEM_TO_DEV) { 1275 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1276 tail_segment->phys); 1277 } else { 1278 if (!chan->tdest) { 1279 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1280 tail_segment->phys); 1281 } else { 1282 dma_ctrl_write(chan, 1283 XILINX_DMA_MCRX_TDESC(chan->tdest), 1284 tail_segment->phys); 1285 } 1286 } 1287 } else { 1288 struct xilinx_axidma_tx_segment *segment; 1289 struct xilinx_axidma_desc_hw *hw; 1290 1291 segment = list_first_entry(&head_desc->segments, 1292 struct xilinx_axidma_tx_segment, 1293 node); 1294 hw = &segment->hw; 1295 1296 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1297 1298 /* Start the transfer */ 1299 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1300 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1301 } 1302 1303 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1304 chan->desc_pendingcount = 0; 1305 } 1306 1307 /** 1308 * xilinx_dma_issue_pending - Issue pending transactions 1309 * @dchan: DMA channel 1310 */ 1311 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1312 { 1313 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1314 unsigned long flags; 1315 1316 spin_lock_irqsave(&chan->lock, flags); 1317 chan->start_transfer(chan); 1318 spin_unlock_irqrestore(&chan->lock, flags); 1319 } 1320 1321 /** 1322 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1323 * @chan : xilinx DMA channel 1324 * 1325 * CONTEXT: hardirq 1326 */ 1327 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1328 { 1329 struct xilinx_dma_tx_descriptor *desc, *next; 1330 1331 /* This function was invoked with lock held */ 1332 if (list_empty(&chan->active_list)) 1333 return; 1334 1335 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1336 list_del(&desc->node); 1337 if (!desc->cyclic) 1338 dma_cookie_complete(&desc->async_tx); 1339 list_add_tail(&desc->node, &chan->done_list); 1340 } 1341 } 1342 1343 /** 1344 * xilinx_dma_reset - Reset DMA channel 1345 * @chan: Driver specific DMA channel 1346 * 1347 * Return: '0' on success and failure value on error 1348 */ 1349 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1350 { 1351 int err; 1352 u32 tmp; 1353 1354 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1355 1356 /* Wait for the hardware to finish reset */ 1357 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1358 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1359 XILINX_DMA_LOOP_COUNT); 1360 1361 if (err) { 1362 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1363 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1364 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1365 return -ETIMEDOUT; 1366 } 1367 1368 chan->err = false; 1369 1370 return err; 1371 } 1372 1373 /** 1374 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1375 * @chan: Driver specific DMA channel 1376 * 1377 * Return: '0' on success and failure value on error 1378 */ 1379 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1380 { 1381 int err; 1382 1383 /* Reset VDMA */ 1384 err = xilinx_dma_reset(chan); 1385 if (err) 1386 return err; 1387 1388 /* Enable interrupts */ 1389 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1390 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1391 1392 return 0; 1393 } 1394 1395 /** 1396 * xilinx_dma_irq_handler - DMA Interrupt handler 1397 * @irq: IRQ number 1398 * @data: Pointer to the Xilinx DMA channel structure 1399 * 1400 * Return: IRQ_HANDLED/IRQ_NONE 1401 */ 1402 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1403 { 1404 struct xilinx_dma_chan *chan = data; 1405 u32 status; 1406 1407 /* Read the status and ack the interrupts. */ 1408 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1409 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1410 return IRQ_NONE; 1411 1412 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1413 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1414 1415 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1416 /* 1417 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1418 * error is recoverable, ignore it. Otherwise flag the error. 1419 * 1420 * Only recoverable errors can be cleared in the DMASR register, 1421 * make sure not to write to other error bits to 1. 1422 */ 1423 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1424 1425 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1426 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1427 1428 if (!chan->flush_on_fsync || 1429 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1430 dev_err(chan->dev, 1431 "Channel %p has errors %x, cdr %x tdr %x\n", 1432 chan, errors, 1433 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1434 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1435 chan->err = true; 1436 } 1437 } 1438 1439 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1440 /* 1441 * Device takes too long to do the transfer when user requires 1442 * responsiveness. 1443 */ 1444 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1445 } 1446 1447 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1448 spin_lock(&chan->lock); 1449 xilinx_dma_complete_descriptor(chan); 1450 chan->start_transfer(chan); 1451 spin_unlock(&chan->lock); 1452 } 1453 1454 tasklet_schedule(&chan->tasklet); 1455 return IRQ_HANDLED; 1456 } 1457 1458 /** 1459 * append_desc_queue - Queuing descriptor 1460 * @chan: Driver specific dma channel 1461 * @desc: dma transaction descriptor 1462 */ 1463 static void append_desc_queue(struct xilinx_dma_chan *chan, 1464 struct xilinx_dma_tx_descriptor *desc) 1465 { 1466 struct xilinx_vdma_tx_segment *tail_segment; 1467 struct xilinx_dma_tx_descriptor *tail_desc; 1468 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1469 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1470 1471 if (list_empty(&chan->pending_list)) 1472 goto append; 1473 1474 /* 1475 * Add the hardware descriptor to the chain of hardware descriptors 1476 * that already exists in memory. 1477 */ 1478 tail_desc = list_last_entry(&chan->pending_list, 1479 struct xilinx_dma_tx_descriptor, node); 1480 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1481 tail_segment = list_last_entry(&tail_desc->segments, 1482 struct xilinx_vdma_tx_segment, 1483 node); 1484 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1485 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1486 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1487 struct xilinx_cdma_tx_segment, 1488 node); 1489 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1490 } else { 1491 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1492 struct xilinx_axidma_tx_segment, 1493 node); 1494 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1495 } 1496 1497 /* 1498 * Add the software descriptor and all children to the list 1499 * of pending transactions 1500 */ 1501 append: 1502 list_add_tail(&desc->node, &chan->pending_list); 1503 chan->desc_pendingcount++; 1504 1505 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1506 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1507 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1508 chan->desc_pendingcount = chan->num_frms; 1509 } 1510 } 1511 1512 /** 1513 * xilinx_dma_tx_submit - Submit DMA transaction 1514 * @tx: Async transaction descriptor 1515 * 1516 * Return: cookie value on success and failure value on error 1517 */ 1518 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1519 { 1520 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1521 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1522 dma_cookie_t cookie; 1523 unsigned long flags; 1524 int err; 1525 1526 if (chan->cyclic) { 1527 xilinx_dma_free_tx_descriptor(chan, desc); 1528 return -EBUSY; 1529 } 1530 1531 if (chan->err) { 1532 /* 1533 * If reset fails, need to hard reset the system. 1534 * Channel is no longer functional 1535 */ 1536 err = xilinx_dma_chan_reset(chan); 1537 if (err < 0) 1538 return err; 1539 } 1540 1541 spin_lock_irqsave(&chan->lock, flags); 1542 1543 cookie = dma_cookie_assign(tx); 1544 1545 /* Put this transaction onto the tail of the pending queue */ 1546 append_desc_queue(chan, desc); 1547 1548 if (desc->cyclic) 1549 chan->cyclic = true; 1550 1551 spin_unlock_irqrestore(&chan->lock, flags); 1552 1553 return cookie; 1554 } 1555 1556 /** 1557 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1558 * DMA_SLAVE transaction 1559 * @dchan: DMA channel 1560 * @xt: Interleaved template pointer 1561 * @flags: transfer ack flags 1562 * 1563 * Return: Async transaction descriptor on success and NULL on failure 1564 */ 1565 static struct dma_async_tx_descriptor * 1566 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1567 struct dma_interleaved_template *xt, 1568 unsigned long flags) 1569 { 1570 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1571 struct xilinx_dma_tx_descriptor *desc; 1572 struct xilinx_vdma_tx_segment *segment, *prev = NULL; 1573 struct xilinx_vdma_desc_hw *hw; 1574 1575 if (!is_slave_direction(xt->dir)) 1576 return NULL; 1577 1578 if (!xt->numf || !xt->sgl[0].size) 1579 return NULL; 1580 1581 if (xt->frame_size != 1) 1582 return NULL; 1583 1584 /* Allocate a transaction descriptor. */ 1585 desc = xilinx_dma_alloc_tx_descriptor(chan); 1586 if (!desc) 1587 return NULL; 1588 1589 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1590 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1591 async_tx_ack(&desc->async_tx); 1592 1593 /* Allocate the link descriptor from DMA pool */ 1594 segment = xilinx_vdma_alloc_tx_segment(chan); 1595 if (!segment) 1596 goto error; 1597 1598 /* Fill in the hardware descriptor */ 1599 hw = &segment->hw; 1600 hw->vsize = xt->numf; 1601 hw->hsize = xt->sgl[0].size; 1602 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1603 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1604 hw->stride |= chan->config.frm_dly << 1605 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1606 1607 if (xt->dir != DMA_MEM_TO_DEV) { 1608 if (chan->ext_addr) { 1609 hw->buf_addr = lower_32_bits(xt->dst_start); 1610 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1611 } else { 1612 hw->buf_addr = xt->dst_start; 1613 } 1614 } else { 1615 if (chan->ext_addr) { 1616 hw->buf_addr = lower_32_bits(xt->src_start); 1617 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1618 } else { 1619 hw->buf_addr = xt->src_start; 1620 } 1621 } 1622 1623 /* Insert the segment into the descriptor segments list. */ 1624 list_add_tail(&segment->node, &desc->segments); 1625 1626 prev = segment; 1627 1628 /* Link the last hardware descriptor with the first. */ 1629 segment = list_first_entry(&desc->segments, 1630 struct xilinx_vdma_tx_segment, node); 1631 desc->async_tx.phys = segment->phys; 1632 1633 return &desc->async_tx; 1634 1635 error: 1636 xilinx_dma_free_tx_descriptor(chan, desc); 1637 return NULL; 1638 } 1639 1640 /** 1641 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1642 * @dchan: DMA channel 1643 * @dma_dst: destination address 1644 * @dma_src: source address 1645 * @len: transfer length 1646 * @flags: transfer ack flags 1647 * 1648 * Return: Async transaction descriptor on success and NULL on failure 1649 */ 1650 static struct dma_async_tx_descriptor * 1651 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1652 dma_addr_t dma_src, size_t len, unsigned long flags) 1653 { 1654 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1655 struct xilinx_dma_tx_descriptor *desc; 1656 struct xilinx_cdma_tx_segment *segment, *prev; 1657 struct xilinx_cdma_desc_hw *hw; 1658 1659 if (!len || len > XILINX_DMA_MAX_TRANS_LEN) 1660 return NULL; 1661 1662 desc = xilinx_dma_alloc_tx_descriptor(chan); 1663 if (!desc) 1664 return NULL; 1665 1666 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1667 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1668 1669 /* Allocate the link descriptor from DMA pool */ 1670 segment = xilinx_cdma_alloc_tx_segment(chan); 1671 if (!segment) 1672 goto error; 1673 1674 hw = &segment->hw; 1675 hw->control = len; 1676 hw->src_addr = dma_src; 1677 hw->dest_addr = dma_dst; 1678 if (chan->ext_addr) { 1679 hw->src_addr_msb = upper_32_bits(dma_src); 1680 hw->dest_addr_msb = upper_32_bits(dma_dst); 1681 } 1682 1683 /* Fill the previous next descriptor with current */ 1684 prev = list_last_entry(&desc->segments, 1685 struct xilinx_cdma_tx_segment, node); 1686 prev->hw.next_desc = segment->phys; 1687 1688 /* Insert the segment into the descriptor segments list. */ 1689 list_add_tail(&segment->node, &desc->segments); 1690 1691 prev = segment; 1692 1693 /* Link the last hardware descriptor with the first. */ 1694 segment = list_first_entry(&desc->segments, 1695 struct xilinx_cdma_tx_segment, node); 1696 desc->async_tx.phys = segment->phys; 1697 prev->hw.next_desc = segment->phys; 1698 1699 return &desc->async_tx; 1700 1701 error: 1702 xilinx_dma_free_tx_descriptor(chan, desc); 1703 return NULL; 1704 } 1705 1706 /** 1707 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1708 * @dchan: DMA channel 1709 * @sgl: scatterlist to transfer to/from 1710 * @sg_len: number of entries in @scatterlist 1711 * @direction: DMA direction 1712 * @flags: transfer ack flags 1713 * @context: APP words of the descriptor 1714 * 1715 * Return: Async transaction descriptor on success and NULL on failure 1716 */ 1717 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1718 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1719 enum dma_transfer_direction direction, unsigned long flags, 1720 void *context) 1721 { 1722 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1723 struct xilinx_dma_tx_descriptor *desc; 1724 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; 1725 u32 *app_w = (u32 *)context; 1726 struct scatterlist *sg; 1727 size_t copy; 1728 size_t sg_used; 1729 unsigned int i; 1730 1731 if (!is_slave_direction(direction)) 1732 return NULL; 1733 1734 /* Allocate a transaction descriptor. */ 1735 desc = xilinx_dma_alloc_tx_descriptor(chan); 1736 if (!desc) 1737 return NULL; 1738 1739 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1740 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1741 1742 /* Build transactions using information in the scatter gather list */ 1743 for_each_sg(sgl, sg, sg_len, i) { 1744 sg_used = 0; 1745 1746 /* Loop until the entire scatterlist entry is used */ 1747 while (sg_used < sg_dma_len(sg)) { 1748 struct xilinx_axidma_desc_hw *hw; 1749 1750 /* Get a free segment */ 1751 segment = xilinx_axidma_alloc_tx_segment(chan); 1752 if (!segment) 1753 goto error; 1754 1755 /* 1756 * Calculate the maximum number of bytes to transfer, 1757 * making sure it is less than the hw limit 1758 */ 1759 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 1760 XILINX_DMA_MAX_TRANS_LEN); 1761 hw = &segment->hw; 1762 1763 /* Fill in the descriptor */ 1764 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1765 sg_used, 0); 1766 1767 hw->control = copy; 1768 1769 if (chan->direction == DMA_MEM_TO_DEV) { 1770 if (app_w) 1771 memcpy(hw->app, app_w, sizeof(u32) * 1772 XILINX_DMA_NUM_APP_WORDS); 1773 } 1774 1775 if (prev) 1776 prev->hw.next_desc = segment->phys; 1777 1778 prev = segment; 1779 sg_used += copy; 1780 1781 /* 1782 * Insert the segment into the descriptor segments 1783 * list. 1784 */ 1785 list_add_tail(&segment->node, &desc->segments); 1786 } 1787 } 1788 1789 segment = list_first_entry(&desc->segments, 1790 struct xilinx_axidma_tx_segment, node); 1791 desc->async_tx.phys = segment->phys; 1792 prev->hw.next_desc = segment->phys; 1793 1794 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1795 if (chan->direction == DMA_MEM_TO_DEV) { 1796 segment->hw.control |= XILINX_DMA_BD_SOP; 1797 segment = list_last_entry(&desc->segments, 1798 struct xilinx_axidma_tx_segment, 1799 node); 1800 segment->hw.control |= XILINX_DMA_BD_EOP; 1801 } 1802 1803 return &desc->async_tx; 1804 1805 error: 1806 xilinx_dma_free_tx_descriptor(chan, desc); 1807 return NULL; 1808 } 1809 1810 /** 1811 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1812 * @chan: DMA channel 1813 * @sgl: scatterlist to transfer to/from 1814 * @sg_len: number of entries in @scatterlist 1815 * @direction: DMA direction 1816 * @flags: transfer ack flags 1817 */ 1818 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1819 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1820 size_t period_len, enum dma_transfer_direction direction, 1821 unsigned long flags) 1822 { 1823 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1824 struct xilinx_dma_tx_descriptor *desc; 1825 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1826 size_t copy, sg_used; 1827 unsigned int num_periods; 1828 int i; 1829 u32 reg; 1830 1831 if (!period_len) 1832 return NULL; 1833 1834 num_periods = buf_len / period_len; 1835 1836 if (!num_periods) 1837 return NULL; 1838 1839 if (!is_slave_direction(direction)) 1840 return NULL; 1841 1842 /* Allocate a transaction descriptor. */ 1843 desc = xilinx_dma_alloc_tx_descriptor(chan); 1844 if (!desc) 1845 return NULL; 1846 1847 chan->direction = direction; 1848 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1849 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1850 1851 for (i = 0; i < num_periods; ++i) { 1852 sg_used = 0; 1853 1854 while (sg_used < period_len) { 1855 struct xilinx_axidma_desc_hw *hw; 1856 1857 /* Get a free segment */ 1858 segment = xilinx_axidma_alloc_tx_segment(chan); 1859 if (!segment) 1860 goto error; 1861 1862 /* 1863 * Calculate the maximum number of bytes to transfer, 1864 * making sure it is less than the hw limit 1865 */ 1866 copy = min_t(size_t, period_len - sg_used, 1867 XILINX_DMA_MAX_TRANS_LEN); 1868 hw = &segment->hw; 1869 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1870 period_len * i); 1871 hw->control = copy; 1872 1873 if (prev) 1874 prev->hw.next_desc = segment->phys; 1875 1876 prev = segment; 1877 sg_used += copy; 1878 1879 /* 1880 * Insert the segment into the descriptor segments 1881 * list. 1882 */ 1883 list_add_tail(&segment->node, &desc->segments); 1884 } 1885 } 1886 1887 head_segment = list_first_entry(&desc->segments, 1888 struct xilinx_axidma_tx_segment, node); 1889 desc->async_tx.phys = head_segment->phys; 1890 1891 desc->cyclic = true; 1892 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1893 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1894 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1895 1896 segment = list_last_entry(&desc->segments, 1897 struct xilinx_axidma_tx_segment, 1898 node); 1899 segment->hw.next_desc = (u32) head_segment->phys; 1900 1901 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1902 if (direction == DMA_MEM_TO_DEV) { 1903 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1904 segment->hw.control |= XILINX_DMA_BD_EOP; 1905 } 1906 1907 return &desc->async_tx; 1908 1909 error: 1910 xilinx_dma_free_tx_descriptor(chan, desc); 1911 return NULL; 1912 } 1913 1914 /** 1915 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1916 * DMA_SLAVE transaction 1917 * @dchan: DMA channel 1918 * @xt: Interleaved template pointer 1919 * @flags: transfer ack flags 1920 * 1921 * Return: Async transaction descriptor on success and NULL on failure 1922 */ 1923 static struct dma_async_tx_descriptor * 1924 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1925 struct dma_interleaved_template *xt, 1926 unsigned long flags) 1927 { 1928 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1929 struct xilinx_dma_tx_descriptor *desc; 1930 struct xilinx_axidma_tx_segment *segment; 1931 struct xilinx_axidma_desc_hw *hw; 1932 1933 if (!is_slave_direction(xt->dir)) 1934 return NULL; 1935 1936 if (!xt->numf || !xt->sgl[0].size) 1937 return NULL; 1938 1939 if (xt->frame_size != 1) 1940 return NULL; 1941 1942 /* Allocate a transaction descriptor. */ 1943 desc = xilinx_dma_alloc_tx_descriptor(chan); 1944 if (!desc) 1945 return NULL; 1946 1947 chan->direction = xt->dir; 1948 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1949 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1950 1951 /* Get a free segment */ 1952 segment = xilinx_axidma_alloc_tx_segment(chan); 1953 if (!segment) 1954 goto error; 1955 1956 hw = &segment->hw; 1957 1958 /* Fill in the descriptor */ 1959 if (xt->dir != DMA_MEM_TO_DEV) 1960 hw->buf_addr = xt->dst_start; 1961 else 1962 hw->buf_addr = xt->src_start; 1963 1964 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 1965 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 1966 XILINX_DMA_BD_VSIZE_MASK; 1967 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 1968 XILINX_DMA_BD_STRIDE_MASK; 1969 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 1970 1971 /* 1972 * Insert the segment into the descriptor segments 1973 * list. 1974 */ 1975 list_add_tail(&segment->node, &desc->segments); 1976 1977 1978 segment = list_first_entry(&desc->segments, 1979 struct xilinx_axidma_tx_segment, node); 1980 desc->async_tx.phys = segment->phys; 1981 1982 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1983 if (xt->dir == DMA_MEM_TO_DEV) { 1984 segment->hw.control |= XILINX_DMA_BD_SOP; 1985 segment = list_last_entry(&desc->segments, 1986 struct xilinx_axidma_tx_segment, 1987 node); 1988 segment->hw.control |= XILINX_DMA_BD_EOP; 1989 } 1990 1991 return &desc->async_tx; 1992 1993 error: 1994 xilinx_dma_free_tx_descriptor(chan, desc); 1995 return NULL; 1996 } 1997 1998 /** 1999 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2000 * @chan: Driver specific DMA Channel pointer 2001 */ 2002 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2003 { 2004 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2005 u32 reg; 2006 2007 if (chan->cyclic) 2008 xilinx_dma_chan_reset(chan); 2009 2010 /* Halt the DMA engine */ 2011 xilinx_dma_halt(chan); 2012 2013 /* Remove and free all of the descriptors in the lists */ 2014 xilinx_dma_free_descriptors(chan); 2015 2016 if (chan->cyclic) { 2017 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2018 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2019 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2020 chan->cyclic = false; 2021 } 2022 2023 return 0; 2024 } 2025 2026 /** 2027 * xilinx_dma_channel_set_config - Configure VDMA channel 2028 * Run-time configuration for Axi VDMA, supports: 2029 * . halt the channel 2030 * . configure interrupt coalescing and inter-packet delay threshold 2031 * . start/stop parking 2032 * . enable genlock 2033 * 2034 * @dchan: DMA channel 2035 * @cfg: VDMA device configuration pointer 2036 * 2037 * Return: '0' on success and failure value on error 2038 */ 2039 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2040 struct xilinx_vdma_config *cfg) 2041 { 2042 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2043 u32 dmacr; 2044 2045 if (cfg->reset) 2046 return xilinx_dma_chan_reset(chan); 2047 2048 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2049 2050 chan->config.frm_dly = cfg->frm_dly; 2051 chan->config.park = cfg->park; 2052 2053 /* genlock settings */ 2054 chan->config.gen_lock = cfg->gen_lock; 2055 chan->config.master = cfg->master; 2056 2057 if (cfg->gen_lock && chan->genlock) { 2058 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2059 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2060 } 2061 2062 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2063 if (cfg->park) 2064 chan->config.park_frm = cfg->park_frm; 2065 else 2066 chan->config.park_frm = -1; 2067 2068 chan->config.coalesc = cfg->coalesc; 2069 chan->config.delay = cfg->delay; 2070 2071 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2072 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2073 chan->config.coalesc = cfg->coalesc; 2074 } 2075 2076 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2077 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2078 chan->config.delay = cfg->delay; 2079 } 2080 2081 /* FSync Source selection */ 2082 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2083 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2084 2085 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2086 2087 return 0; 2088 } 2089 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2090 2091 /* ----------------------------------------------------------------------------- 2092 * Probe and remove 2093 */ 2094 2095 /** 2096 * xilinx_dma_chan_remove - Per Channel remove function 2097 * @chan: Driver specific DMA channel 2098 */ 2099 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2100 { 2101 /* Disable all interrupts */ 2102 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2103 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2104 2105 if (chan->irq > 0) 2106 free_irq(chan->irq, chan); 2107 2108 tasklet_kill(&chan->tasklet); 2109 2110 list_del(&chan->common.device_node); 2111 } 2112 2113 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2114 struct clk **tx_clk, struct clk **rx_clk, 2115 struct clk **sg_clk, struct clk **tmp_clk) 2116 { 2117 int err; 2118 2119 *tmp_clk = NULL; 2120 2121 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2122 if (IS_ERR(*axi_clk)) { 2123 err = PTR_ERR(*axi_clk); 2124 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); 2125 return err; 2126 } 2127 2128 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2129 if (IS_ERR(*tx_clk)) 2130 *tx_clk = NULL; 2131 2132 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2133 if (IS_ERR(*rx_clk)) 2134 *rx_clk = NULL; 2135 2136 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2137 if (IS_ERR(*sg_clk)) 2138 *sg_clk = NULL; 2139 2140 err = clk_prepare_enable(*axi_clk); 2141 if (err) { 2142 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2143 return err; 2144 } 2145 2146 err = clk_prepare_enable(*tx_clk); 2147 if (err) { 2148 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2149 goto err_disable_axiclk; 2150 } 2151 2152 err = clk_prepare_enable(*rx_clk); 2153 if (err) { 2154 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2155 goto err_disable_txclk; 2156 } 2157 2158 err = clk_prepare_enable(*sg_clk); 2159 if (err) { 2160 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); 2161 goto err_disable_rxclk; 2162 } 2163 2164 return 0; 2165 2166 err_disable_rxclk: 2167 clk_disable_unprepare(*rx_clk); 2168 err_disable_txclk: 2169 clk_disable_unprepare(*tx_clk); 2170 err_disable_axiclk: 2171 clk_disable_unprepare(*axi_clk); 2172 2173 return err; 2174 } 2175 2176 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2177 struct clk **dev_clk, struct clk **tmp_clk, 2178 struct clk **tmp1_clk, struct clk **tmp2_clk) 2179 { 2180 int err; 2181 2182 *tmp_clk = NULL; 2183 *tmp1_clk = NULL; 2184 *tmp2_clk = NULL; 2185 2186 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2187 if (IS_ERR(*axi_clk)) { 2188 err = PTR_ERR(*axi_clk); 2189 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); 2190 return err; 2191 } 2192 2193 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2194 if (IS_ERR(*dev_clk)) { 2195 err = PTR_ERR(*dev_clk); 2196 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); 2197 return err; 2198 } 2199 2200 err = clk_prepare_enable(*axi_clk); 2201 if (err) { 2202 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2203 return err; 2204 } 2205 2206 err = clk_prepare_enable(*dev_clk); 2207 if (err) { 2208 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); 2209 goto err_disable_axiclk; 2210 } 2211 2212 return 0; 2213 2214 err_disable_axiclk: 2215 clk_disable_unprepare(*axi_clk); 2216 2217 return err; 2218 } 2219 2220 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2221 struct clk **tx_clk, struct clk **txs_clk, 2222 struct clk **rx_clk, struct clk **rxs_clk) 2223 { 2224 int err; 2225 2226 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2227 if (IS_ERR(*axi_clk)) { 2228 err = PTR_ERR(*axi_clk); 2229 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); 2230 return err; 2231 } 2232 2233 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2234 if (IS_ERR(*tx_clk)) 2235 *tx_clk = NULL; 2236 2237 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2238 if (IS_ERR(*txs_clk)) 2239 *txs_clk = NULL; 2240 2241 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2242 if (IS_ERR(*rx_clk)) 2243 *rx_clk = NULL; 2244 2245 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2246 if (IS_ERR(*rxs_clk)) 2247 *rxs_clk = NULL; 2248 2249 err = clk_prepare_enable(*axi_clk); 2250 if (err) { 2251 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2252 return err; 2253 } 2254 2255 err = clk_prepare_enable(*tx_clk); 2256 if (err) { 2257 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2258 goto err_disable_axiclk; 2259 } 2260 2261 err = clk_prepare_enable(*txs_clk); 2262 if (err) { 2263 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); 2264 goto err_disable_txclk; 2265 } 2266 2267 err = clk_prepare_enable(*rx_clk); 2268 if (err) { 2269 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2270 goto err_disable_txsclk; 2271 } 2272 2273 err = clk_prepare_enable(*rxs_clk); 2274 if (err) { 2275 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); 2276 goto err_disable_rxclk; 2277 } 2278 2279 return 0; 2280 2281 err_disable_rxclk: 2282 clk_disable_unprepare(*rx_clk); 2283 err_disable_txsclk: 2284 clk_disable_unprepare(*txs_clk); 2285 err_disable_txclk: 2286 clk_disable_unprepare(*tx_clk); 2287 err_disable_axiclk: 2288 clk_disable_unprepare(*axi_clk); 2289 2290 return err; 2291 } 2292 2293 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2294 { 2295 clk_disable_unprepare(xdev->rxs_clk); 2296 clk_disable_unprepare(xdev->rx_clk); 2297 clk_disable_unprepare(xdev->txs_clk); 2298 clk_disable_unprepare(xdev->tx_clk); 2299 clk_disable_unprepare(xdev->axi_clk); 2300 } 2301 2302 /** 2303 * xilinx_dma_chan_probe - Per Channel Probing 2304 * It get channel features from the device tree entry and 2305 * initialize special channel handling routines 2306 * 2307 * @xdev: Driver specific device structure 2308 * @node: Device node 2309 * 2310 * Return: '0' on success and failure value on error 2311 */ 2312 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2313 struct device_node *node, int chan_id) 2314 { 2315 struct xilinx_dma_chan *chan; 2316 bool has_dre = false; 2317 u32 value, width; 2318 int err; 2319 2320 /* Allocate and initialize the channel structure */ 2321 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2322 if (!chan) 2323 return -ENOMEM; 2324 2325 chan->dev = xdev->dev; 2326 chan->xdev = xdev; 2327 chan->has_sg = xdev->has_sg; 2328 chan->desc_pendingcount = 0x0; 2329 chan->ext_addr = xdev->ext_addr; 2330 2331 spin_lock_init(&chan->lock); 2332 INIT_LIST_HEAD(&chan->pending_list); 2333 INIT_LIST_HEAD(&chan->done_list); 2334 INIT_LIST_HEAD(&chan->active_list); 2335 2336 /* Retrieve the channel properties from the device tree */ 2337 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2338 2339 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2340 2341 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2342 if (err) { 2343 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2344 return err; 2345 } 2346 width = value >> 3; /* Convert bits to bytes */ 2347 2348 /* If data width is greater than 8 bytes, DRE is not in hw */ 2349 if (width > 8) 2350 has_dre = false; 2351 2352 if (!has_dre) 2353 xdev->common.copy_align = fls(width - 1); 2354 2355 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2356 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2357 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2358 chan->direction = DMA_MEM_TO_DEV; 2359 chan->id = chan_id; 2360 chan->tdest = chan_id; 2361 2362 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2363 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2364 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2365 2366 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2367 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2368 chan->flush_on_fsync = true; 2369 } 2370 } else if (of_device_is_compatible(node, 2371 "xlnx,axi-vdma-s2mm-channel") || 2372 of_device_is_compatible(node, 2373 "xlnx,axi-dma-s2mm-channel")) { 2374 chan->direction = DMA_DEV_TO_MEM; 2375 chan->id = chan_id; 2376 chan->tdest = chan_id - xdev->nr_channels; 2377 2378 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2379 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2380 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2381 2382 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2383 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2384 chan->flush_on_fsync = true; 2385 } 2386 } else { 2387 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2388 return -EINVAL; 2389 } 2390 2391 /* Request the interrupt */ 2392 chan->irq = irq_of_parse_and_map(node, 0); 2393 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2394 "xilinx-dma-controller", chan); 2395 if (err) { 2396 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2397 return err; 2398 } 2399 2400 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2401 chan->start_transfer = xilinx_dma_start_transfer; 2402 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 2403 chan->start_transfer = xilinx_cdma_start_transfer; 2404 else 2405 chan->start_transfer = xilinx_vdma_start_transfer; 2406 2407 /* Initialize the tasklet */ 2408 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2409 (unsigned long)chan); 2410 2411 /* 2412 * Initialize the DMA channel and add it to the DMA engine channels 2413 * list. 2414 */ 2415 chan->common.device = &xdev->common; 2416 2417 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2418 xdev->chan[chan->id] = chan; 2419 2420 /* Reset the channel */ 2421 err = xilinx_dma_chan_reset(chan); 2422 if (err < 0) { 2423 dev_err(xdev->dev, "Reset channel failed\n"); 2424 return err; 2425 } 2426 2427 return 0; 2428 } 2429 2430 /** 2431 * xilinx_dma_child_probe - Per child node probe 2432 * It get number of dma-channels per child node from 2433 * device-tree and initializes all the channels. 2434 * 2435 * @xdev: Driver specific device structure 2436 * @node: Device node 2437 * 2438 * Return: 0 always. 2439 */ 2440 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2441 struct device_node *node) { 2442 int ret, i, nr_channels = 1; 2443 2444 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2445 if ((ret < 0) && xdev->mcdma) 2446 dev_warn(xdev->dev, "missing dma-channels property\n"); 2447 2448 for (i = 0; i < nr_channels; i++) 2449 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2450 2451 xdev->nr_channels += nr_channels; 2452 2453 return 0; 2454 } 2455 2456 /** 2457 * of_dma_xilinx_xlate - Translation function 2458 * @dma_spec: Pointer to DMA specifier as found in the device tree 2459 * @ofdma: Pointer to DMA controller data 2460 * 2461 * Return: DMA channel pointer on success and NULL on error 2462 */ 2463 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2464 struct of_dma *ofdma) 2465 { 2466 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2467 int chan_id = dma_spec->args[0]; 2468 2469 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2470 return NULL; 2471 2472 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2473 } 2474 2475 static const struct xilinx_dma_config axidma_config = { 2476 .dmatype = XDMA_TYPE_AXIDMA, 2477 .clk_init = axidma_clk_init, 2478 }; 2479 2480 static const struct xilinx_dma_config axicdma_config = { 2481 .dmatype = XDMA_TYPE_CDMA, 2482 .clk_init = axicdma_clk_init, 2483 }; 2484 2485 static const struct xilinx_dma_config axivdma_config = { 2486 .dmatype = XDMA_TYPE_VDMA, 2487 .clk_init = axivdma_clk_init, 2488 }; 2489 2490 static const struct of_device_id xilinx_dma_of_ids[] = { 2491 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2492 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2493 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2494 {} 2495 }; 2496 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2497 2498 /** 2499 * xilinx_dma_probe - Driver probe function 2500 * @pdev: Pointer to the platform_device structure 2501 * 2502 * Return: '0' on success and failure value on error 2503 */ 2504 static int xilinx_dma_probe(struct platform_device *pdev) 2505 { 2506 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2507 struct clk **, struct clk **, struct clk **) 2508 = axivdma_clk_init; 2509 struct device_node *node = pdev->dev.of_node; 2510 struct xilinx_dma_device *xdev; 2511 struct device_node *child, *np = pdev->dev.of_node; 2512 struct resource *io; 2513 u32 num_frames, addr_width; 2514 int i, err; 2515 2516 /* Allocate and initialize the DMA engine structure */ 2517 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2518 if (!xdev) 2519 return -ENOMEM; 2520 2521 xdev->dev = &pdev->dev; 2522 if (np) { 2523 const struct of_device_id *match; 2524 2525 match = of_match_node(xilinx_dma_of_ids, np); 2526 if (match && match->data) { 2527 xdev->dma_config = match->data; 2528 clk_init = xdev->dma_config->clk_init; 2529 } 2530 } 2531 2532 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2533 &xdev->rx_clk, &xdev->rxs_clk); 2534 if (err) 2535 return err; 2536 2537 /* Request and map I/O memory */ 2538 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2539 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2540 if (IS_ERR(xdev->regs)) 2541 return PTR_ERR(xdev->regs); 2542 2543 /* Retrieve the DMA engine properties from the device tree */ 2544 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2545 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2546 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2547 2548 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2549 err = of_property_read_u32(node, "xlnx,num-fstores", 2550 &num_frames); 2551 if (err < 0) { 2552 dev_err(xdev->dev, 2553 "missing xlnx,num-fstores property\n"); 2554 return err; 2555 } 2556 2557 err = of_property_read_u32(node, "xlnx,flush-fsync", 2558 &xdev->flush_on_fsync); 2559 if (err < 0) 2560 dev_warn(xdev->dev, 2561 "missing xlnx,flush-fsync property\n"); 2562 } 2563 2564 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2565 if (err < 0) 2566 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2567 2568 if (addr_width > 32) 2569 xdev->ext_addr = true; 2570 else 2571 xdev->ext_addr = false; 2572 2573 /* Set the dma mask bits */ 2574 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2575 2576 /* Initialize the DMA engine */ 2577 xdev->common.dev = &pdev->dev; 2578 2579 INIT_LIST_HEAD(&xdev->common.channels); 2580 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2581 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2582 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2583 } 2584 2585 xdev->common.device_alloc_chan_resources = 2586 xilinx_dma_alloc_chan_resources; 2587 xdev->common.device_free_chan_resources = 2588 xilinx_dma_free_chan_resources; 2589 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2590 xdev->common.device_tx_status = xilinx_dma_tx_status; 2591 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2592 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2593 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2594 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2595 xdev->common.device_prep_dma_cyclic = 2596 xilinx_dma_prep_dma_cyclic; 2597 xdev->common.device_prep_interleaved_dma = 2598 xilinx_dma_prep_interleaved; 2599 /* Residue calculation is supported by only AXI DMA */ 2600 xdev->common.residue_granularity = 2601 DMA_RESIDUE_GRANULARITY_SEGMENT; 2602 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2603 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2604 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2605 } else { 2606 xdev->common.device_prep_interleaved_dma = 2607 xilinx_vdma_dma_prep_interleaved; 2608 } 2609 2610 platform_set_drvdata(pdev, xdev); 2611 2612 /* Initialize the channels */ 2613 for_each_child_of_node(node, child) { 2614 err = xilinx_dma_child_probe(xdev, child); 2615 if (err < 0) 2616 goto disable_clks; 2617 } 2618 2619 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2620 for (i = 0; i < xdev->nr_channels; i++) 2621 if (xdev->chan[i]) 2622 xdev->chan[i]->num_frms = num_frames; 2623 } 2624 2625 /* Register the DMA engine with the core */ 2626 dma_async_device_register(&xdev->common); 2627 2628 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2629 xdev); 2630 if (err < 0) { 2631 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2632 dma_async_device_unregister(&xdev->common); 2633 goto error; 2634 } 2635 2636 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2637 2638 return 0; 2639 2640 disable_clks: 2641 xdma_disable_allclks(xdev); 2642 error: 2643 for (i = 0; i < xdev->nr_channels; i++) 2644 if (xdev->chan[i]) 2645 xilinx_dma_chan_remove(xdev->chan[i]); 2646 2647 return err; 2648 } 2649 2650 /** 2651 * xilinx_dma_remove - Driver remove function 2652 * @pdev: Pointer to the platform_device structure 2653 * 2654 * Return: Always '0' 2655 */ 2656 static int xilinx_dma_remove(struct platform_device *pdev) 2657 { 2658 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2659 int i; 2660 2661 of_dma_controller_free(pdev->dev.of_node); 2662 2663 dma_async_device_unregister(&xdev->common); 2664 2665 for (i = 0; i < xdev->nr_channels; i++) 2666 if (xdev->chan[i]) 2667 xilinx_dma_chan_remove(xdev->chan[i]); 2668 2669 xdma_disable_allclks(xdev); 2670 2671 return 0; 2672 } 2673 2674 static struct platform_driver xilinx_vdma_driver = { 2675 .driver = { 2676 .name = "xilinx-vdma", 2677 .of_match_table = xilinx_dma_of_ids, 2678 }, 2679 .probe = xilinx_dma_probe, 2680 .remove = xilinx_dma_remove, 2681 }; 2682 2683 module_platform_driver(xilinx_vdma_driver); 2684 2685 MODULE_AUTHOR("Xilinx, Inc."); 2686 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2687 MODULE_LICENSE("GPL v2"); 2688