1 /* 2 * DMA driver for Xilinx Video DMA Engine 3 * 4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 5 * 6 * Based on the Freescale DMA driver. 7 * 8 * Description: 9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 10 * core that provides high-bandwidth direct memory access between memory 11 * and AXI4-Stream type video target peripherals. The core provides efficient 12 * two dimensional DMA operations with independent asynchronous read (S2MM) 13 * and write (MM2S) channel operation. It can be configured to have either 14 * one channel or two channels. If configured as two channels, one is to 15 * transmit to the video device (MM2S) and another is to receive from the 16 * video device (S2MM). Initialization, status, interrupt and management 17 * registers are accessed through an AXI4-Lite slave interface. 18 * 19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 20 * provides high-bandwidth one dimensional direct memory access between memory 21 * and AXI4-Stream target peripherals. It supports one receive and one 22 * transmit channel, both of them optional at synthesis time. 23 * 24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 25 * Access (DMA) between a memory-mapped source address and a memory-mapped 26 * destination address. 27 * 28 * This program is free software: you can redistribute it and/or modify 29 * it under the terms of the GNU General Public License as published by 30 * the Free Software Foundation, either version 2 of the License, or 31 * (at your option) any later version. 32 */ 33 34 #include <linux/bitops.h> 35 #include <linux/dmapool.h> 36 #include <linux/dma/xilinx_dma.h> 37 #include <linux/init.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/iopoll.h> 41 #include <linux/module.h> 42 #include <linux/of_address.h> 43 #include <linux/of_dma.h> 44 #include <linux/of_platform.h> 45 #include <linux/of_irq.h> 46 #include <linux/slab.h> 47 #include <linux/clk.h> 48 #include <linux/io-64-nonatomic-lo-hi.h> 49 50 #include "../dmaengine.h" 51 52 /* Register/Descriptor Offsets */ 53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 57 58 /* Control Registers */ 59 #define XILINX_DMA_REG_DMACR 0x0000 60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 71 #define XILINX_DMA_DMACR_RESET BIT(2) 72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 75 76 #define XILINX_DMA_REG_DMASR 0x0004 77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 89 #define XILINX_DMA_DMASR_IDLE BIT(1) 90 #define XILINX_DMA_DMASR_HALTED BIT(0) 91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 93 94 #define XILINX_DMA_REG_CURDESC 0x0008 95 #define XILINX_DMA_REG_TAILDESC 0x0010 96 #define XILINX_DMA_REG_REG_INDEX 0x0014 97 #define XILINX_DMA_REG_FRMSTORE 0x0018 98 #define XILINX_DMA_REG_THRESHOLD 0x001c 99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 100 #define XILINX_DMA_REG_PARK_PTR 0x0028 101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 104 105 /* Register Direct Mode Registers */ 106 #define XILINX_DMA_REG_VSIZE 0x0000 107 #define XILINX_DMA_REG_HSIZE 0x0004 108 109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 112 113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 115 116 /* HW specific definitions */ 117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 118 119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 122 XILINX_DMA_DMASR_ERR_IRQ) 123 124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 126 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 127 XILINX_DMA_DMASR_SG_DEC_ERR | \ 128 XILINX_DMA_DMASR_SG_SLV_ERR | \ 129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 131 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 133 XILINX_DMA_DMASR_DMA_INT_ERR) 134 135 /* 136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 138 * is enabled in the h/w system. 139 */ 140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 144 XILINX_DMA_DMASR_DMA_INT_ERR) 145 146 /* Axi VDMA Flush on Fsync bits */ 147 #define XILINX_DMA_FLUSH_S2MM 3 148 #define XILINX_DMA_FLUSH_MM2S 2 149 #define XILINX_DMA_FLUSH_BOTH 1 150 151 /* Delay loop counter to prevent hardware failure */ 152 #define XILINX_DMA_LOOP_COUNT 1000000 153 154 /* AXI DMA Specific Registers/Offsets */ 155 #define XILINX_DMA_REG_SRCDSTADDR 0x18 156 #define XILINX_DMA_REG_BTT 0x28 157 158 /* AXI DMA Specific Masks/Bit fields */ 159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) 160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 162 #define XILINX_DMA_CR_COALESCE_SHIFT 16 163 #define XILINX_DMA_BD_SOP BIT(27) 164 #define XILINX_DMA_BD_EOP BIT(26) 165 #define XILINX_DMA_COALESCE_MAX 255 166 #define XILINX_DMA_NUM_APP_WORDS 5 167 168 /* Multi-Channel DMA Descriptor offsets*/ 169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 171 172 /* Multi-Channel DMA Masks/Shifts */ 173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 177 #define XILINX_DMA_BD_STRIDE_SHIFT 0 178 #define XILINX_DMA_BD_VSIZE_SHIFT 19 179 180 /* AXI CDMA Specific Registers/Offsets */ 181 #define XILINX_CDMA_REG_SRCADDR 0x18 182 #define XILINX_CDMA_REG_DSTADDR 0x20 183 184 /* AXI CDMA Specific Masks */ 185 #define XILINX_CDMA_CR_SGMODE BIT(3) 186 187 /** 188 * struct xilinx_vdma_desc_hw - Hardware Descriptor 189 * @next_desc: Next Descriptor Pointer @0x00 190 * @pad1: Reserved @0x04 191 * @buf_addr: Buffer address @0x08 192 * @buf_addr_msb: MSB of Buffer address @0x0C 193 * @vsize: Vertical Size @0x10 194 * @hsize: Horizontal Size @0x14 195 * @stride: Number of bytes between the first 196 * pixels of each horizontal line @0x18 197 */ 198 struct xilinx_vdma_desc_hw { 199 u32 next_desc; 200 u32 pad1; 201 u32 buf_addr; 202 u32 buf_addr_msb; 203 u32 vsize; 204 u32 hsize; 205 u32 stride; 206 } __aligned(64); 207 208 /** 209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 210 * @next_desc: Next Descriptor Pointer @0x00 211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 212 * @buf_addr: Buffer address @0x08 213 * @buf_addr_msb: MSB of Buffer address @0x0C 214 * @pad1: Reserved @0x10 215 * @pad2: Reserved @0x14 216 * @control: Control field @0x18 217 * @status: Status field @0x1C 218 * @app: APP Fields @0x20 - 0x30 219 */ 220 struct xilinx_axidma_desc_hw { 221 u32 next_desc; 222 u32 next_desc_msb; 223 u32 buf_addr; 224 u32 buf_addr_msb; 225 u32 mcdma_control; 226 u32 vsize_stride; 227 u32 control; 228 u32 status; 229 u32 app[XILINX_DMA_NUM_APP_WORDS]; 230 } __aligned(64); 231 232 /** 233 * struct xilinx_cdma_desc_hw - Hardware Descriptor 234 * @next_desc: Next Descriptor Pointer @0x00 235 * @next_descmsb: Next Descriptor Pointer MSB @0x04 236 * @src_addr: Source address @0x08 237 * @src_addrmsb: Source address MSB @0x0C 238 * @dest_addr: Destination address @0x10 239 * @dest_addrmsb: Destination address MSB @0x14 240 * @control: Control field @0x18 241 * @status: Status field @0x1C 242 */ 243 struct xilinx_cdma_desc_hw { 244 u32 next_desc; 245 u32 next_desc_msb; 246 u32 src_addr; 247 u32 src_addr_msb; 248 u32 dest_addr; 249 u32 dest_addr_msb; 250 u32 control; 251 u32 status; 252 } __aligned(64); 253 254 /** 255 * struct xilinx_vdma_tx_segment - Descriptor segment 256 * @hw: Hardware descriptor 257 * @node: Node in the descriptor segments list 258 * @phys: Physical address of segment 259 */ 260 struct xilinx_vdma_tx_segment { 261 struct xilinx_vdma_desc_hw hw; 262 struct list_head node; 263 dma_addr_t phys; 264 } __aligned(64); 265 266 /** 267 * struct xilinx_axidma_tx_segment - Descriptor segment 268 * @hw: Hardware descriptor 269 * @node: Node in the descriptor segments list 270 * @phys: Physical address of segment 271 */ 272 struct xilinx_axidma_tx_segment { 273 struct xilinx_axidma_desc_hw hw; 274 struct list_head node; 275 dma_addr_t phys; 276 } __aligned(64); 277 278 /** 279 * struct xilinx_cdma_tx_segment - Descriptor segment 280 * @hw: Hardware descriptor 281 * @node: Node in the descriptor segments list 282 * @phys: Physical address of segment 283 */ 284 struct xilinx_cdma_tx_segment { 285 struct xilinx_cdma_desc_hw hw; 286 struct list_head node; 287 dma_addr_t phys; 288 } __aligned(64); 289 290 /** 291 * struct xilinx_dma_tx_descriptor - Per Transaction structure 292 * @async_tx: Async transaction descriptor 293 * @segments: TX segments list 294 * @node: Node in the channel descriptors list 295 * @cyclic: Check for cyclic transfers. 296 */ 297 struct xilinx_dma_tx_descriptor { 298 struct dma_async_tx_descriptor async_tx; 299 struct list_head segments; 300 struct list_head node; 301 bool cyclic; 302 }; 303 304 /** 305 * struct xilinx_dma_chan - Driver specific DMA channel structure 306 * @xdev: Driver specific device structure 307 * @ctrl_offset: Control registers offset 308 * @desc_offset: TX descriptor registers offset 309 * @lock: Descriptor operation lock 310 * @pending_list: Descriptors waiting 311 * @active_list: Descriptors ready to submit 312 * @done_list: Complete descriptors 313 * @common: DMA common channel 314 * @desc_pool: Descriptors pool 315 * @dev: The dma device 316 * @irq: Channel IRQ 317 * @id: Channel ID 318 * @direction: Transfer direction 319 * @num_frms: Number of frames 320 * @has_sg: Support scatter transfers 321 * @cyclic: Check for cyclic transfers. 322 * @genlock: Support genlock mode 323 * @err: Channel has errors 324 * @tasklet: Cleanup work after irq 325 * @config: Device configuration info 326 * @flush_on_fsync: Flush on Frame sync 327 * @desc_pendingcount: Descriptor pending count 328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 329 * @desc_submitcount: Descriptor h/w submitted count 330 * @residue: Residue for AXI DMA 331 * @seg_v: Statically allocated segments base 332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 333 * @start_transfer: Differentiate b/w DMA IP's transfer 334 * @stop_transfer: Differentiate b/w DMA IP's quiesce 335 */ 336 struct xilinx_dma_chan { 337 struct xilinx_dma_device *xdev; 338 u32 ctrl_offset; 339 u32 desc_offset; 340 spinlock_t lock; 341 struct list_head pending_list; 342 struct list_head active_list; 343 struct list_head done_list; 344 struct dma_chan common; 345 struct dma_pool *desc_pool; 346 struct device *dev; 347 int irq; 348 int id; 349 enum dma_transfer_direction direction; 350 int num_frms; 351 bool has_sg; 352 bool cyclic; 353 bool genlock; 354 bool err; 355 struct tasklet_struct tasklet; 356 struct xilinx_vdma_config config; 357 bool flush_on_fsync; 358 u32 desc_pendingcount; 359 bool ext_addr; 360 u32 desc_submitcount; 361 u32 residue; 362 struct xilinx_axidma_tx_segment *seg_v; 363 struct xilinx_axidma_tx_segment *cyclic_seg_v; 364 void (*start_transfer)(struct xilinx_dma_chan *chan); 365 int (*stop_transfer)(struct xilinx_dma_chan *chan); 366 u16 tdest; 367 }; 368 369 /** 370 * enum xdma_ip_type: DMA IP type. 371 * 372 * XDMA_TYPE_AXIDMA: Axi dma ip. 373 * XDMA_TYPE_CDMA: Axi cdma ip. 374 * XDMA_TYPE_VDMA: Axi vdma ip. 375 * 376 */ 377 enum xdma_ip_type { 378 XDMA_TYPE_AXIDMA = 0, 379 XDMA_TYPE_CDMA, 380 XDMA_TYPE_VDMA, 381 }; 382 383 struct xilinx_dma_config { 384 enum xdma_ip_type dmatype; 385 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 386 struct clk **tx_clk, struct clk **txs_clk, 387 struct clk **rx_clk, struct clk **rxs_clk); 388 }; 389 390 /** 391 * struct xilinx_dma_device - DMA device structure 392 * @regs: I/O mapped base address 393 * @dev: Device Structure 394 * @common: DMA device structure 395 * @chan: Driver specific DMA channel 396 * @has_sg: Specifies whether Scatter-Gather is present or not 397 * @mcdma: Specifies whether Multi-Channel is present or not 398 * @flush_on_fsync: Flush on frame sync 399 * @ext_addr: Indicates 64 bit addressing is supported by dma device 400 * @pdev: Platform device structure pointer 401 * @dma_config: DMA config structure 402 * @axi_clk: DMA Axi4-lite interace clock 403 * @tx_clk: DMA mm2s clock 404 * @txs_clk: DMA mm2s stream clock 405 * @rx_clk: DMA s2mm clock 406 * @rxs_clk: DMA s2mm stream clock 407 * @nr_channels: Number of channels DMA device supports 408 * @chan_id: DMA channel identifier 409 */ 410 struct xilinx_dma_device { 411 void __iomem *regs; 412 struct device *dev; 413 struct dma_device common; 414 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 415 bool has_sg; 416 bool mcdma; 417 u32 flush_on_fsync; 418 bool ext_addr; 419 struct platform_device *pdev; 420 const struct xilinx_dma_config *dma_config; 421 struct clk *axi_clk; 422 struct clk *tx_clk; 423 struct clk *txs_clk; 424 struct clk *rx_clk; 425 struct clk *rxs_clk; 426 u32 nr_channels; 427 u32 chan_id; 428 }; 429 430 /* Macros */ 431 #define to_xilinx_chan(chan) \ 432 container_of(chan, struct xilinx_dma_chan, common) 433 #define to_dma_tx_descriptor(tx) \ 434 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 435 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 436 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 437 cond, delay_us, timeout_us) 438 439 /* IO accessors */ 440 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 441 { 442 return ioread32(chan->xdev->regs + reg); 443 } 444 445 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 446 { 447 iowrite32(value, chan->xdev->regs + reg); 448 } 449 450 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 451 u32 value) 452 { 453 dma_write(chan, chan->desc_offset + reg, value); 454 } 455 456 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 457 { 458 return dma_read(chan, chan->ctrl_offset + reg); 459 } 460 461 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 462 u32 value) 463 { 464 dma_write(chan, chan->ctrl_offset + reg, value); 465 } 466 467 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 468 u32 clr) 469 { 470 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 471 } 472 473 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 474 u32 set) 475 { 476 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 477 } 478 479 /** 480 * vdma_desc_write_64 - 64-bit descriptor write 481 * @chan: Driver specific VDMA channel 482 * @reg: Register to write 483 * @value_lsb: lower address of the descriptor. 484 * @value_msb: upper address of the descriptor. 485 * 486 * Since vdma driver is trying to write to a register offset which is not a 487 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 488 * instead of a single 64 bit register write. 489 */ 490 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 491 u32 value_lsb, u32 value_msb) 492 { 493 /* Write the lsb 32 bits*/ 494 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 495 496 /* Write the msb 32 bits */ 497 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 498 } 499 500 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 501 { 502 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 503 } 504 505 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 506 dma_addr_t addr) 507 { 508 if (chan->ext_addr) 509 dma_writeq(chan, reg, addr); 510 else 511 dma_ctrl_write(chan, reg, addr); 512 } 513 514 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 515 struct xilinx_axidma_desc_hw *hw, 516 dma_addr_t buf_addr, size_t sg_used, 517 size_t period_len) 518 { 519 if (chan->ext_addr) { 520 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 521 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 522 period_len); 523 } else { 524 hw->buf_addr = buf_addr + sg_used + period_len; 525 } 526 } 527 528 /* ----------------------------------------------------------------------------- 529 * Descriptors and segments alloc and free 530 */ 531 532 /** 533 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 534 * @chan: Driver specific DMA channel 535 * 536 * Return: The allocated segment on success and NULL on failure. 537 */ 538 static struct xilinx_vdma_tx_segment * 539 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 540 { 541 struct xilinx_vdma_tx_segment *segment; 542 dma_addr_t phys; 543 544 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 545 if (!segment) 546 return NULL; 547 548 segment->phys = phys; 549 550 return segment; 551 } 552 553 /** 554 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 555 * @chan: Driver specific DMA channel 556 * 557 * Return: The allocated segment on success and NULL on failure. 558 */ 559 static struct xilinx_cdma_tx_segment * 560 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 561 { 562 struct xilinx_cdma_tx_segment *segment; 563 dma_addr_t phys; 564 565 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 566 if (!segment) 567 return NULL; 568 569 segment->phys = phys; 570 571 return segment; 572 } 573 574 /** 575 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 576 * @chan: Driver specific DMA channel 577 * 578 * Return: The allocated segment on success and NULL on failure. 579 */ 580 static struct xilinx_axidma_tx_segment * 581 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 582 { 583 struct xilinx_axidma_tx_segment *segment; 584 dma_addr_t phys; 585 586 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 587 if (!segment) 588 return NULL; 589 590 segment->phys = phys; 591 592 return segment; 593 } 594 595 /** 596 * xilinx_dma_free_tx_segment - Free transaction segment 597 * @chan: Driver specific DMA channel 598 * @segment: DMA transaction segment 599 */ 600 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 601 struct xilinx_axidma_tx_segment *segment) 602 { 603 dma_pool_free(chan->desc_pool, segment, segment->phys); 604 } 605 606 /** 607 * xilinx_cdma_free_tx_segment - Free transaction segment 608 * @chan: Driver specific DMA channel 609 * @segment: DMA transaction segment 610 */ 611 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 612 struct xilinx_cdma_tx_segment *segment) 613 { 614 dma_pool_free(chan->desc_pool, segment, segment->phys); 615 } 616 617 /** 618 * xilinx_vdma_free_tx_segment - Free transaction segment 619 * @chan: Driver specific DMA channel 620 * @segment: DMA transaction segment 621 */ 622 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 623 struct xilinx_vdma_tx_segment *segment) 624 { 625 dma_pool_free(chan->desc_pool, segment, segment->phys); 626 } 627 628 /** 629 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 630 * @chan: Driver specific DMA channel 631 * 632 * Return: The allocated descriptor on success and NULL on failure. 633 */ 634 static struct xilinx_dma_tx_descriptor * 635 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 636 { 637 struct xilinx_dma_tx_descriptor *desc; 638 639 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 640 if (!desc) 641 return NULL; 642 643 INIT_LIST_HEAD(&desc->segments); 644 645 return desc; 646 } 647 648 /** 649 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 650 * @chan: Driver specific DMA channel 651 * @desc: DMA transaction descriptor 652 */ 653 static void 654 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 655 struct xilinx_dma_tx_descriptor *desc) 656 { 657 struct xilinx_vdma_tx_segment *segment, *next; 658 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 659 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 660 661 if (!desc) 662 return; 663 664 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 665 list_for_each_entry_safe(segment, next, &desc->segments, node) { 666 list_del(&segment->node); 667 xilinx_vdma_free_tx_segment(chan, segment); 668 } 669 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 670 list_for_each_entry_safe(cdma_segment, cdma_next, 671 &desc->segments, node) { 672 list_del(&cdma_segment->node); 673 xilinx_cdma_free_tx_segment(chan, cdma_segment); 674 } 675 } else { 676 list_for_each_entry_safe(axidma_segment, axidma_next, 677 &desc->segments, node) { 678 list_del(&axidma_segment->node); 679 xilinx_dma_free_tx_segment(chan, axidma_segment); 680 } 681 } 682 683 kfree(desc); 684 } 685 686 /* Required functions */ 687 688 /** 689 * xilinx_dma_free_desc_list - Free descriptors list 690 * @chan: Driver specific DMA channel 691 * @list: List to parse and delete the descriptor 692 */ 693 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 694 struct list_head *list) 695 { 696 struct xilinx_dma_tx_descriptor *desc, *next; 697 698 list_for_each_entry_safe(desc, next, list, node) { 699 list_del(&desc->node); 700 xilinx_dma_free_tx_descriptor(chan, desc); 701 } 702 } 703 704 /** 705 * xilinx_dma_free_descriptors - Free channel descriptors 706 * @chan: Driver specific DMA channel 707 */ 708 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 709 { 710 unsigned long flags; 711 712 spin_lock_irqsave(&chan->lock, flags); 713 714 xilinx_dma_free_desc_list(chan, &chan->pending_list); 715 xilinx_dma_free_desc_list(chan, &chan->done_list); 716 xilinx_dma_free_desc_list(chan, &chan->active_list); 717 718 spin_unlock_irqrestore(&chan->lock, flags); 719 } 720 721 /** 722 * xilinx_dma_free_chan_resources - Free channel resources 723 * @dchan: DMA channel 724 */ 725 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 726 { 727 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 728 729 dev_dbg(chan->dev, "Free all channel resources.\n"); 730 731 xilinx_dma_free_descriptors(chan); 732 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 733 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); 734 xilinx_dma_free_tx_segment(chan, chan->seg_v); 735 } 736 dma_pool_destroy(chan->desc_pool); 737 chan->desc_pool = NULL; 738 } 739 740 /** 741 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 742 * @chan: Driver specific dma channel 743 * @desc: dma transaction descriptor 744 * @flags: flags for spin lock 745 */ 746 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 747 struct xilinx_dma_tx_descriptor *desc, 748 unsigned long *flags) 749 { 750 dma_async_tx_callback callback; 751 void *callback_param; 752 753 callback = desc->async_tx.callback; 754 callback_param = desc->async_tx.callback_param; 755 if (callback) { 756 spin_unlock_irqrestore(&chan->lock, *flags); 757 callback(callback_param); 758 spin_lock_irqsave(&chan->lock, *flags); 759 } 760 } 761 762 /** 763 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 764 * @chan: Driver specific DMA channel 765 */ 766 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 767 { 768 struct xilinx_dma_tx_descriptor *desc, *next; 769 unsigned long flags; 770 771 spin_lock_irqsave(&chan->lock, flags); 772 773 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 774 struct dmaengine_desc_callback cb; 775 776 if (desc->cyclic) { 777 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 778 break; 779 } 780 781 /* Remove from the list of running transactions */ 782 list_del(&desc->node); 783 784 /* Run the link descriptor callback function */ 785 dmaengine_desc_get_callback(&desc->async_tx, &cb); 786 if (dmaengine_desc_callback_valid(&cb)) { 787 spin_unlock_irqrestore(&chan->lock, flags); 788 dmaengine_desc_callback_invoke(&cb, NULL); 789 spin_lock_irqsave(&chan->lock, flags); 790 } 791 792 /* Run any dependencies, then free the descriptor */ 793 dma_run_dependencies(&desc->async_tx); 794 xilinx_dma_free_tx_descriptor(chan, desc); 795 } 796 797 spin_unlock_irqrestore(&chan->lock, flags); 798 } 799 800 /** 801 * xilinx_dma_do_tasklet - Schedule completion tasklet 802 * @data: Pointer to the Xilinx DMA channel structure 803 */ 804 static void xilinx_dma_do_tasklet(unsigned long data) 805 { 806 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 807 808 xilinx_dma_chan_desc_cleanup(chan); 809 } 810 811 /** 812 * xilinx_dma_alloc_chan_resources - Allocate channel resources 813 * @dchan: DMA channel 814 * 815 * Return: '0' on success and failure value on error 816 */ 817 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 818 { 819 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 820 821 /* Has this channel already been allocated? */ 822 if (chan->desc_pool) 823 return 0; 824 825 /* 826 * We need the descriptor to be aligned to 64bytes 827 * for meeting Xilinx VDMA specification requirement. 828 */ 829 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 830 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", 831 chan->dev, 832 sizeof(struct xilinx_axidma_tx_segment), 833 __alignof__(struct xilinx_axidma_tx_segment), 834 0); 835 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 836 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 837 chan->dev, 838 sizeof(struct xilinx_cdma_tx_segment), 839 __alignof__(struct xilinx_cdma_tx_segment), 840 0); 841 } else { 842 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 843 chan->dev, 844 sizeof(struct xilinx_vdma_tx_segment), 845 __alignof__(struct xilinx_vdma_tx_segment), 846 0); 847 } 848 849 if (!chan->desc_pool) { 850 dev_err(chan->dev, 851 "unable to allocate channel %d descriptor pool\n", 852 chan->id); 853 return -ENOMEM; 854 } 855 856 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 857 /* 858 * For AXI DMA case after submitting a pending_list, keep 859 * an extra segment allocated so that the "next descriptor" 860 * pointer on the tail descriptor always points to a 861 * valid descriptor, even when paused after reaching taildesc. 862 * This way, it is possible to issue additional 863 * transfers without halting and restarting the channel. 864 */ 865 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); 866 867 /* 868 * For cyclic DMA mode we need to program the tail Descriptor 869 * register with a value which is not a part of the BD chain 870 * so allocating a desc segment during channel allocation for 871 * programming tail descriptor. 872 */ 873 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); 874 } 875 876 dma_cookie_init(dchan); 877 878 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 879 /* For AXI DMA resetting once channel will reset the 880 * other channel as well so enable the interrupts here. 881 */ 882 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 883 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 884 } 885 886 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 887 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 888 XILINX_CDMA_CR_SGMODE); 889 890 return 0; 891 } 892 893 /** 894 * xilinx_dma_tx_status - Get DMA transaction status 895 * @dchan: DMA channel 896 * @cookie: Transaction identifier 897 * @txstate: Transaction state 898 * 899 * Return: DMA transaction status 900 */ 901 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 902 dma_cookie_t cookie, 903 struct dma_tx_state *txstate) 904 { 905 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 906 struct xilinx_dma_tx_descriptor *desc; 907 struct xilinx_axidma_tx_segment *segment; 908 struct xilinx_axidma_desc_hw *hw; 909 enum dma_status ret; 910 unsigned long flags; 911 u32 residue = 0; 912 913 ret = dma_cookie_status(dchan, cookie, txstate); 914 if (ret == DMA_COMPLETE || !txstate) 915 return ret; 916 917 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 918 spin_lock_irqsave(&chan->lock, flags); 919 920 desc = list_last_entry(&chan->active_list, 921 struct xilinx_dma_tx_descriptor, node); 922 if (chan->has_sg) { 923 list_for_each_entry(segment, &desc->segments, node) { 924 hw = &segment->hw; 925 residue += (hw->control - hw->status) & 926 XILINX_DMA_MAX_TRANS_LEN; 927 } 928 } 929 spin_unlock_irqrestore(&chan->lock, flags); 930 931 chan->residue = residue; 932 dma_set_residue(txstate, chan->residue); 933 } 934 935 return ret; 936 } 937 938 /** 939 * xilinx_dma_is_running - Check if DMA channel is running 940 * @chan: Driver specific DMA channel 941 * 942 * Return: '1' if running, '0' if not. 943 */ 944 static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) 945 { 946 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 947 XILINX_DMA_DMASR_HALTED) && 948 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & 949 XILINX_DMA_DMACR_RUNSTOP); 950 } 951 952 /** 953 * xilinx_dma_is_idle - Check if DMA channel is idle 954 * @chan: Driver specific DMA channel 955 * 956 * Return: '1' if idle, '0' if not. 957 */ 958 static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) 959 { 960 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 961 XILINX_DMA_DMASR_IDLE; 962 } 963 964 /** 965 * xilinx_dma_stop_transfer - Halt DMA channel 966 * @chan: Driver specific DMA channel 967 */ 968 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 969 { 970 u32 val; 971 972 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 973 974 /* Wait for the hardware to halt */ 975 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 976 val & XILINX_DMA_DMASR_HALTED, 0, 977 XILINX_DMA_LOOP_COUNT); 978 } 979 980 /** 981 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 982 * @chan: Driver specific DMA channel 983 */ 984 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 985 { 986 u32 val; 987 988 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 989 val & XILINX_DMA_DMASR_IDLE, 0, 990 XILINX_DMA_LOOP_COUNT); 991 } 992 993 /** 994 * xilinx_dma_start - Start DMA channel 995 * @chan: Driver specific DMA channel 996 */ 997 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 998 { 999 int err; 1000 u32 val; 1001 1002 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1003 1004 /* Wait for the hardware to start */ 1005 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1006 !(val & XILINX_DMA_DMASR_HALTED), 0, 1007 XILINX_DMA_LOOP_COUNT); 1008 1009 if (err) { 1010 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1011 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1012 1013 chan->err = true; 1014 } 1015 } 1016 1017 /** 1018 * xilinx_vdma_start_transfer - Starts VDMA transfer 1019 * @chan: Driver specific channel struct pointer 1020 */ 1021 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1022 { 1023 struct xilinx_vdma_config *config = &chan->config; 1024 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1025 u32 reg; 1026 struct xilinx_vdma_tx_segment *tail_segment; 1027 1028 /* This function was invoked with lock held */ 1029 if (chan->err) 1030 return; 1031 1032 if (list_empty(&chan->pending_list)) 1033 return; 1034 1035 desc = list_first_entry(&chan->pending_list, 1036 struct xilinx_dma_tx_descriptor, node); 1037 tail_desc = list_last_entry(&chan->pending_list, 1038 struct xilinx_dma_tx_descriptor, node); 1039 1040 tail_segment = list_last_entry(&tail_desc->segments, 1041 struct xilinx_vdma_tx_segment, node); 1042 1043 /* If it is SG mode and hardware is busy, cannot submit */ 1044 if (chan->has_sg && xilinx_dma_is_running(chan) && 1045 !xilinx_dma_is_idle(chan)) { 1046 dev_dbg(chan->dev, "DMA controller still busy\n"); 1047 return; 1048 } 1049 1050 /* 1051 * If hardware is idle, then all descriptors on the running lists are 1052 * done, start new transfers 1053 */ 1054 if (chan->has_sg) 1055 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1056 desc->async_tx.phys); 1057 1058 /* Configure the hardware using info in the config structure */ 1059 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1060 1061 if (config->frm_cnt_en) 1062 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1063 else 1064 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1065 1066 /* Configure channel to allow number frame buffers */ 1067 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, 1068 chan->desc_pendingcount); 1069 1070 /* 1071 * With SG, start with circular mode, so that BDs can be fetched. 1072 * In direct register mode, if not parking, enable circular mode 1073 */ 1074 if (chan->has_sg || !config->park) 1075 reg |= XILINX_DMA_DMACR_CIRC_EN; 1076 1077 if (config->park) 1078 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1079 1080 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1081 1082 if (config->park && (config->park_frm >= 0) && 1083 (config->park_frm < chan->num_frms)) { 1084 if (chan->direction == DMA_MEM_TO_DEV) 1085 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1086 config->park_frm << 1087 XILINX_DMA_PARK_PTR_RD_REF_SHIFT); 1088 else 1089 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1090 config->park_frm << 1091 XILINX_DMA_PARK_PTR_WR_REF_SHIFT); 1092 } 1093 1094 /* Start the hardware */ 1095 xilinx_dma_start(chan); 1096 1097 if (chan->err) 1098 return; 1099 1100 /* Start the transfer */ 1101 if (chan->has_sg) { 1102 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1103 tail_segment->phys); 1104 } else { 1105 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1106 int i = 0; 1107 1108 if (chan->desc_submitcount < chan->num_frms) 1109 i = chan->desc_submitcount; 1110 1111 list_for_each_entry(segment, &desc->segments, node) { 1112 if (chan->ext_addr) 1113 vdma_desc_write_64(chan, 1114 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1115 segment->hw.buf_addr, 1116 segment->hw.buf_addr_msb); 1117 else 1118 vdma_desc_write(chan, 1119 XILINX_VDMA_REG_START_ADDRESS(i++), 1120 segment->hw.buf_addr); 1121 1122 last = segment; 1123 } 1124 1125 if (!last) 1126 return; 1127 1128 /* HW expects these parameters to be same for one transaction */ 1129 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1130 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1131 last->hw.stride); 1132 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1133 } 1134 1135 if (!chan->has_sg) { 1136 list_del(&desc->node); 1137 list_add_tail(&desc->node, &chan->active_list); 1138 chan->desc_submitcount++; 1139 chan->desc_pendingcount--; 1140 if (chan->desc_submitcount == chan->num_frms) 1141 chan->desc_submitcount = 0; 1142 } else { 1143 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1144 chan->desc_pendingcount = 0; 1145 } 1146 } 1147 1148 /** 1149 * xilinx_cdma_start_transfer - Starts cdma transfer 1150 * @chan: Driver specific channel struct pointer 1151 */ 1152 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1153 { 1154 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1155 struct xilinx_cdma_tx_segment *tail_segment; 1156 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1157 1158 if (chan->err) 1159 return; 1160 1161 if (list_empty(&chan->pending_list)) 1162 return; 1163 1164 head_desc = list_first_entry(&chan->pending_list, 1165 struct xilinx_dma_tx_descriptor, node); 1166 tail_desc = list_last_entry(&chan->pending_list, 1167 struct xilinx_dma_tx_descriptor, node); 1168 tail_segment = list_last_entry(&tail_desc->segments, 1169 struct xilinx_cdma_tx_segment, node); 1170 1171 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1172 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1173 ctrl_reg |= chan->desc_pendingcount << 1174 XILINX_DMA_CR_COALESCE_SHIFT; 1175 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1176 } 1177 1178 if (chan->has_sg) { 1179 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1180 head_desc->async_tx.phys); 1181 1182 /* Update tail ptr register which will start the transfer */ 1183 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1184 tail_segment->phys); 1185 } else { 1186 /* In simple mode */ 1187 struct xilinx_cdma_tx_segment *segment; 1188 struct xilinx_cdma_desc_hw *hw; 1189 1190 segment = list_first_entry(&head_desc->segments, 1191 struct xilinx_cdma_tx_segment, 1192 node); 1193 1194 hw = &segment->hw; 1195 1196 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); 1197 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); 1198 1199 /* Start the transfer */ 1200 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1201 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1202 } 1203 1204 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1205 chan->desc_pendingcount = 0; 1206 } 1207 1208 /** 1209 * xilinx_dma_start_transfer - Starts DMA transfer 1210 * @chan: Driver specific channel struct pointer 1211 */ 1212 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1213 { 1214 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1215 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; 1216 u32 reg; 1217 1218 if (chan->err) 1219 return; 1220 1221 if (list_empty(&chan->pending_list)) 1222 return; 1223 1224 /* If it is SG mode and hardware is busy, cannot submit */ 1225 if (chan->has_sg && xilinx_dma_is_running(chan) && 1226 !xilinx_dma_is_idle(chan)) { 1227 dev_dbg(chan->dev, "DMA controller still busy\n"); 1228 return; 1229 } 1230 1231 head_desc = list_first_entry(&chan->pending_list, 1232 struct xilinx_dma_tx_descriptor, node); 1233 tail_desc = list_last_entry(&chan->pending_list, 1234 struct xilinx_dma_tx_descriptor, node); 1235 tail_segment = list_last_entry(&tail_desc->segments, 1236 struct xilinx_axidma_tx_segment, node); 1237 1238 if (chan->has_sg && !chan->xdev->mcdma) { 1239 old_head = list_first_entry(&head_desc->segments, 1240 struct xilinx_axidma_tx_segment, node); 1241 new_head = chan->seg_v; 1242 /* Copy Buffer Descriptor fields. */ 1243 new_head->hw = old_head->hw; 1244 1245 /* Swap and save new reserve */ 1246 list_replace_init(&old_head->node, &new_head->node); 1247 chan->seg_v = old_head; 1248 1249 tail_segment->hw.next_desc = chan->seg_v->phys; 1250 head_desc->async_tx.phys = new_head->phys; 1251 } 1252 1253 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1254 1255 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1256 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1257 reg |= chan->desc_pendingcount << 1258 XILINX_DMA_CR_COALESCE_SHIFT; 1259 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1260 } 1261 1262 if (chan->has_sg && !chan->xdev->mcdma) 1263 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1264 head_desc->async_tx.phys); 1265 1266 if (chan->has_sg && chan->xdev->mcdma) { 1267 if (chan->direction == DMA_MEM_TO_DEV) { 1268 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1269 head_desc->async_tx.phys); 1270 } else { 1271 if (!chan->tdest) { 1272 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1273 head_desc->async_tx.phys); 1274 } else { 1275 dma_ctrl_write(chan, 1276 XILINX_DMA_MCRX_CDESC(chan->tdest), 1277 head_desc->async_tx.phys); 1278 } 1279 } 1280 } 1281 1282 xilinx_dma_start(chan); 1283 1284 if (chan->err) 1285 return; 1286 1287 /* Start the transfer */ 1288 if (chan->has_sg && !chan->xdev->mcdma) { 1289 if (chan->cyclic) 1290 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1291 chan->cyclic_seg_v->phys); 1292 else 1293 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1294 tail_segment->phys); 1295 } else if (chan->has_sg && chan->xdev->mcdma) { 1296 if (chan->direction == DMA_MEM_TO_DEV) { 1297 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1298 tail_segment->phys); 1299 } else { 1300 if (!chan->tdest) { 1301 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1302 tail_segment->phys); 1303 } else { 1304 dma_ctrl_write(chan, 1305 XILINX_DMA_MCRX_TDESC(chan->tdest), 1306 tail_segment->phys); 1307 } 1308 } 1309 } else { 1310 struct xilinx_axidma_tx_segment *segment; 1311 struct xilinx_axidma_desc_hw *hw; 1312 1313 segment = list_first_entry(&head_desc->segments, 1314 struct xilinx_axidma_tx_segment, 1315 node); 1316 hw = &segment->hw; 1317 1318 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1319 1320 /* Start the transfer */ 1321 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1322 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1323 } 1324 1325 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1326 chan->desc_pendingcount = 0; 1327 } 1328 1329 /** 1330 * xilinx_dma_issue_pending - Issue pending transactions 1331 * @dchan: DMA channel 1332 */ 1333 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1334 { 1335 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1336 unsigned long flags; 1337 1338 spin_lock_irqsave(&chan->lock, flags); 1339 chan->start_transfer(chan); 1340 spin_unlock_irqrestore(&chan->lock, flags); 1341 } 1342 1343 /** 1344 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1345 * @chan : xilinx DMA channel 1346 * 1347 * CONTEXT: hardirq 1348 */ 1349 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1350 { 1351 struct xilinx_dma_tx_descriptor *desc, *next; 1352 1353 /* This function was invoked with lock held */ 1354 if (list_empty(&chan->active_list)) 1355 return; 1356 1357 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1358 list_del(&desc->node); 1359 if (!desc->cyclic) 1360 dma_cookie_complete(&desc->async_tx); 1361 list_add_tail(&desc->node, &chan->done_list); 1362 } 1363 } 1364 1365 /** 1366 * xilinx_dma_reset - Reset DMA channel 1367 * @chan: Driver specific DMA channel 1368 * 1369 * Return: '0' on success and failure value on error 1370 */ 1371 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1372 { 1373 int err; 1374 u32 tmp; 1375 1376 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1377 1378 /* Wait for the hardware to finish reset */ 1379 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1380 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1381 XILINX_DMA_LOOP_COUNT); 1382 1383 if (err) { 1384 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1385 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1386 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1387 return -ETIMEDOUT; 1388 } 1389 1390 chan->err = false; 1391 1392 return err; 1393 } 1394 1395 /** 1396 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1397 * @chan: Driver specific DMA channel 1398 * 1399 * Return: '0' on success and failure value on error 1400 */ 1401 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1402 { 1403 int err; 1404 1405 /* Reset VDMA */ 1406 err = xilinx_dma_reset(chan); 1407 if (err) 1408 return err; 1409 1410 /* Enable interrupts */ 1411 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1412 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1413 1414 return 0; 1415 } 1416 1417 /** 1418 * xilinx_dma_irq_handler - DMA Interrupt handler 1419 * @irq: IRQ number 1420 * @data: Pointer to the Xilinx DMA channel structure 1421 * 1422 * Return: IRQ_HANDLED/IRQ_NONE 1423 */ 1424 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1425 { 1426 struct xilinx_dma_chan *chan = data; 1427 u32 status; 1428 1429 /* Read the status and ack the interrupts. */ 1430 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1431 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1432 return IRQ_NONE; 1433 1434 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1435 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1436 1437 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1438 /* 1439 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1440 * error is recoverable, ignore it. Otherwise flag the error. 1441 * 1442 * Only recoverable errors can be cleared in the DMASR register, 1443 * make sure not to write to other error bits to 1. 1444 */ 1445 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1446 1447 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1448 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1449 1450 if (!chan->flush_on_fsync || 1451 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1452 dev_err(chan->dev, 1453 "Channel %p has errors %x, cdr %x tdr %x\n", 1454 chan, errors, 1455 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1456 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1457 chan->err = true; 1458 } 1459 } 1460 1461 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1462 /* 1463 * Device takes too long to do the transfer when user requires 1464 * responsiveness. 1465 */ 1466 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1467 } 1468 1469 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1470 spin_lock(&chan->lock); 1471 xilinx_dma_complete_descriptor(chan); 1472 chan->start_transfer(chan); 1473 spin_unlock(&chan->lock); 1474 } 1475 1476 tasklet_schedule(&chan->tasklet); 1477 return IRQ_HANDLED; 1478 } 1479 1480 /** 1481 * append_desc_queue - Queuing descriptor 1482 * @chan: Driver specific dma channel 1483 * @desc: dma transaction descriptor 1484 */ 1485 static void append_desc_queue(struct xilinx_dma_chan *chan, 1486 struct xilinx_dma_tx_descriptor *desc) 1487 { 1488 struct xilinx_vdma_tx_segment *tail_segment; 1489 struct xilinx_dma_tx_descriptor *tail_desc; 1490 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1491 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1492 1493 if (list_empty(&chan->pending_list)) 1494 goto append; 1495 1496 /* 1497 * Add the hardware descriptor to the chain of hardware descriptors 1498 * that already exists in memory. 1499 */ 1500 tail_desc = list_last_entry(&chan->pending_list, 1501 struct xilinx_dma_tx_descriptor, node); 1502 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1503 tail_segment = list_last_entry(&tail_desc->segments, 1504 struct xilinx_vdma_tx_segment, 1505 node); 1506 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1507 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1508 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1509 struct xilinx_cdma_tx_segment, 1510 node); 1511 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1512 } else { 1513 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1514 struct xilinx_axidma_tx_segment, 1515 node); 1516 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1517 } 1518 1519 /* 1520 * Add the software descriptor and all children to the list 1521 * of pending transactions 1522 */ 1523 append: 1524 list_add_tail(&desc->node, &chan->pending_list); 1525 chan->desc_pendingcount++; 1526 1527 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1528 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1529 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1530 chan->desc_pendingcount = chan->num_frms; 1531 } 1532 } 1533 1534 /** 1535 * xilinx_dma_tx_submit - Submit DMA transaction 1536 * @tx: Async transaction descriptor 1537 * 1538 * Return: cookie value on success and failure value on error 1539 */ 1540 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1541 { 1542 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1543 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1544 dma_cookie_t cookie; 1545 unsigned long flags; 1546 int err; 1547 1548 if (chan->cyclic) { 1549 xilinx_dma_free_tx_descriptor(chan, desc); 1550 return -EBUSY; 1551 } 1552 1553 if (chan->err) { 1554 /* 1555 * If reset fails, need to hard reset the system. 1556 * Channel is no longer functional 1557 */ 1558 err = xilinx_dma_chan_reset(chan); 1559 if (err < 0) 1560 return err; 1561 } 1562 1563 spin_lock_irqsave(&chan->lock, flags); 1564 1565 cookie = dma_cookie_assign(tx); 1566 1567 /* Put this transaction onto the tail of the pending queue */ 1568 append_desc_queue(chan, desc); 1569 1570 if (desc->cyclic) 1571 chan->cyclic = true; 1572 1573 spin_unlock_irqrestore(&chan->lock, flags); 1574 1575 return cookie; 1576 } 1577 1578 /** 1579 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1580 * DMA_SLAVE transaction 1581 * @dchan: DMA channel 1582 * @xt: Interleaved template pointer 1583 * @flags: transfer ack flags 1584 * 1585 * Return: Async transaction descriptor on success and NULL on failure 1586 */ 1587 static struct dma_async_tx_descriptor * 1588 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1589 struct dma_interleaved_template *xt, 1590 unsigned long flags) 1591 { 1592 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1593 struct xilinx_dma_tx_descriptor *desc; 1594 struct xilinx_vdma_tx_segment *segment, *prev = NULL; 1595 struct xilinx_vdma_desc_hw *hw; 1596 1597 if (!is_slave_direction(xt->dir)) 1598 return NULL; 1599 1600 if (!xt->numf || !xt->sgl[0].size) 1601 return NULL; 1602 1603 if (xt->frame_size != 1) 1604 return NULL; 1605 1606 /* Allocate a transaction descriptor. */ 1607 desc = xilinx_dma_alloc_tx_descriptor(chan); 1608 if (!desc) 1609 return NULL; 1610 1611 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1612 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1613 async_tx_ack(&desc->async_tx); 1614 1615 /* Allocate the link descriptor from DMA pool */ 1616 segment = xilinx_vdma_alloc_tx_segment(chan); 1617 if (!segment) 1618 goto error; 1619 1620 /* Fill in the hardware descriptor */ 1621 hw = &segment->hw; 1622 hw->vsize = xt->numf; 1623 hw->hsize = xt->sgl[0].size; 1624 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1625 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1626 hw->stride |= chan->config.frm_dly << 1627 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1628 1629 if (xt->dir != DMA_MEM_TO_DEV) { 1630 if (chan->ext_addr) { 1631 hw->buf_addr = lower_32_bits(xt->dst_start); 1632 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1633 } else { 1634 hw->buf_addr = xt->dst_start; 1635 } 1636 } else { 1637 if (chan->ext_addr) { 1638 hw->buf_addr = lower_32_bits(xt->src_start); 1639 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1640 } else { 1641 hw->buf_addr = xt->src_start; 1642 } 1643 } 1644 1645 /* Insert the segment into the descriptor segments list. */ 1646 list_add_tail(&segment->node, &desc->segments); 1647 1648 prev = segment; 1649 1650 /* Link the last hardware descriptor with the first. */ 1651 segment = list_first_entry(&desc->segments, 1652 struct xilinx_vdma_tx_segment, node); 1653 desc->async_tx.phys = segment->phys; 1654 1655 return &desc->async_tx; 1656 1657 error: 1658 xilinx_dma_free_tx_descriptor(chan, desc); 1659 return NULL; 1660 } 1661 1662 /** 1663 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1664 * @dchan: DMA channel 1665 * @dma_dst: destination address 1666 * @dma_src: source address 1667 * @len: transfer length 1668 * @flags: transfer ack flags 1669 * 1670 * Return: Async transaction descriptor on success and NULL on failure 1671 */ 1672 static struct dma_async_tx_descriptor * 1673 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1674 dma_addr_t dma_src, size_t len, unsigned long flags) 1675 { 1676 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1677 struct xilinx_dma_tx_descriptor *desc; 1678 struct xilinx_cdma_tx_segment *segment; 1679 struct xilinx_cdma_desc_hw *hw; 1680 1681 if (!len || len > XILINX_DMA_MAX_TRANS_LEN) 1682 return NULL; 1683 1684 desc = xilinx_dma_alloc_tx_descriptor(chan); 1685 if (!desc) 1686 return NULL; 1687 1688 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1689 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1690 1691 /* Allocate the link descriptor from DMA pool */ 1692 segment = xilinx_cdma_alloc_tx_segment(chan); 1693 if (!segment) 1694 goto error; 1695 1696 hw = &segment->hw; 1697 hw->control = len; 1698 hw->src_addr = dma_src; 1699 hw->dest_addr = dma_dst; 1700 if (chan->ext_addr) { 1701 hw->src_addr_msb = upper_32_bits(dma_src); 1702 hw->dest_addr_msb = upper_32_bits(dma_dst); 1703 } 1704 1705 /* Insert the segment into the descriptor segments list. */ 1706 list_add_tail(&segment->node, &desc->segments); 1707 1708 desc->async_tx.phys = segment->phys; 1709 hw->next_desc = segment->phys; 1710 1711 return &desc->async_tx; 1712 1713 error: 1714 xilinx_dma_free_tx_descriptor(chan, desc); 1715 return NULL; 1716 } 1717 1718 /** 1719 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1720 * @dchan: DMA channel 1721 * @sgl: scatterlist to transfer to/from 1722 * @sg_len: number of entries in @scatterlist 1723 * @direction: DMA direction 1724 * @flags: transfer ack flags 1725 * @context: APP words of the descriptor 1726 * 1727 * Return: Async transaction descriptor on success and NULL on failure 1728 */ 1729 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1730 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1731 enum dma_transfer_direction direction, unsigned long flags, 1732 void *context) 1733 { 1734 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1735 struct xilinx_dma_tx_descriptor *desc; 1736 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; 1737 u32 *app_w = (u32 *)context; 1738 struct scatterlist *sg; 1739 size_t copy; 1740 size_t sg_used; 1741 unsigned int i; 1742 1743 if (!is_slave_direction(direction)) 1744 return NULL; 1745 1746 /* Allocate a transaction descriptor. */ 1747 desc = xilinx_dma_alloc_tx_descriptor(chan); 1748 if (!desc) 1749 return NULL; 1750 1751 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1752 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1753 1754 /* Build transactions using information in the scatter gather list */ 1755 for_each_sg(sgl, sg, sg_len, i) { 1756 sg_used = 0; 1757 1758 /* Loop until the entire scatterlist entry is used */ 1759 while (sg_used < sg_dma_len(sg)) { 1760 struct xilinx_axidma_desc_hw *hw; 1761 1762 /* Get a free segment */ 1763 segment = xilinx_axidma_alloc_tx_segment(chan); 1764 if (!segment) 1765 goto error; 1766 1767 /* 1768 * Calculate the maximum number of bytes to transfer, 1769 * making sure it is less than the hw limit 1770 */ 1771 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 1772 XILINX_DMA_MAX_TRANS_LEN); 1773 hw = &segment->hw; 1774 1775 /* Fill in the descriptor */ 1776 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1777 sg_used, 0); 1778 1779 hw->control = copy; 1780 1781 if (chan->direction == DMA_MEM_TO_DEV) { 1782 if (app_w) 1783 memcpy(hw->app, app_w, sizeof(u32) * 1784 XILINX_DMA_NUM_APP_WORDS); 1785 } 1786 1787 if (prev) 1788 prev->hw.next_desc = segment->phys; 1789 1790 prev = segment; 1791 sg_used += copy; 1792 1793 /* 1794 * Insert the segment into the descriptor segments 1795 * list. 1796 */ 1797 list_add_tail(&segment->node, &desc->segments); 1798 } 1799 } 1800 1801 segment = list_first_entry(&desc->segments, 1802 struct xilinx_axidma_tx_segment, node); 1803 desc->async_tx.phys = segment->phys; 1804 prev->hw.next_desc = segment->phys; 1805 1806 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1807 if (chan->direction == DMA_MEM_TO_DEV) { 1808 segment->hw.control |= XILINX_DMA_BD_SOP; 1809 segment = list_last_entry(&desc->segments, 1810 struct xilinx_axidma_tx_segment, 1811 node); 1812 segment->hw.control |= XILINX_DMA_BD_EOP; 1813 } 1814 1815 return &desc->async_tx; 1816 1817 error: 1818 xilinx_dma_free_tx_descriptor(chan, desc); 1819 return NULL; 1820 } 1821 1822 /** 1823 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1824 * @chan: DMA channel 1825 * @sgl: scatterlist to transfer to/from 1826 * @sg_len: number of entries in @scatterlist 1827 * @direction: DMA direction 1828 * @flags: transfer ack flags 1829 */ 1830 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1831 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1832 size_t period_len, enum dma_transfer_direction direction, 1833 unsigned long flags) 1834 { 1835 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1836 struct xilinx_dma_tx_descriptor *desc; 1837 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1838 size_t copy, sg_used; 1839 unsigned int num_periods; 1840 int i; 1841 u32 reg; 1842 1843 if (!period_len) 1844 return NULL; 1845 1846 num_periods = buf_len / period_len; 1847 1848 if (!num_periods) 1849 return NULL; 1850 1851 if (!is_slave_direction(direction)) 1852 return NULL; 1853 1854 /* Allocate a transaction descriptor. */ 1855 desc = xilinx_dma_alloc_tx_descriptor(chan); 1856 if (!desc) 1857 return NULL; 1858 1859 chan->direction = direction; 1860 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1861 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1862 1863 for (i = 0; i < num_periods; ++i) { 1864 sg_used = 0; 1865 1866 while (sg_used < period_len) { 1867 struct xilinx_axidma_desc_hw *hw; 1868 1869 /* Get a free segment */ 1870 segment = xilinx_axidma_alloc_tx_segment(chan); 1871 if (!segment) 1872 goto error; 1873 1874 /* 1875 * Calculate the maximum number of bytes to transfer, 1876 * making sure it is less than the hw limit 1877 */ 1878 copy = min_t(size_t, period_len - sg_used, 1879 XILINX_DMA_MAX_TRANS_LEN); 1880 hw = &segment->hw; 1881 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1882 period_len * i); 1883 hw->control = copy; 1884 1885 if (prev) 1886 prev->hw.next_desc = segment->phys; 1887 1888 prev = segment; 1889 sg_used += copy; 1890 1891 /* 1892 * Insert the segment into the descriptor segments 1893 * list. 1894 */ 1895 list_add_tail(&segment->node, &desc->segments); 1896 } 1897 } 1898 1899 head_segment = list_first_entry(&desc->segments, 1900 struct xilinx_axidma_tx_segment, node); 1901 desc->async_tx.phys = head_segment->phys; 1902 1903 desc->cyclic = true; 1904 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1905 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1906 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1907 1908 segment = list_last_entry(&desc->segments, 1909 struct xilinx_axidma_tx_segment, 1910 node); 1911 segment->hw.next_desc = (u32) head_segment->phys; 1912 1913 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1914 if (direction == DMA_MEM_TO_DEV) { 1915 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1916 segment->hw.control |= XILINX_DMA_BD_EOP; 1917 } 1918 1919 return &desc->async_tx; 1920 1921 error: 1922 xilinx_dma_free_tx_descriptor(chan, desc); 1923 return NULL; 1924 } 1925 1926 /** 1927 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1928 * DMA_SLAVE transaction 1929 * @dchan: DMA channel 1930 * @xt: Interleaved template pointer 1931 * @flags: transfer ack flags 1932 * 1933 * Return: Async transaction descriptor on success and NULL on failure 1934 */ 1935 static struct dma_async_tx_descriptor * 1936 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1937 struct dma_interleaved_template *xt, 1938 unsigned long flags) 1939 { 1940 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1941 struct xilinx_dma_tx_descriptor *desc; 1942 struct xilinx_axidma_tx_segment *segment; 1943 struct xilinx_axidma_desc_hw *hw; 1944 1945 if (!is_slave_direction(xt->dir)) 1946 return NULL; 1947 1948 if (!xt->numf || !xt->sgl[0].size) 1949 return NULL; 1950 1951 if (xt->frame_size != 1) 1952 return NULL; 1953 1954 /* Allocate a transaction descriptor. */ 1955 desc = xilinx_dma_alloc_tx_descriptor(chan); 1956 if (!desc) 1957 return NULL; 1958 1959 chan->direction = xt->dir; 1960 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1961 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1962 1963 /* Get a free segment */ 1964 segment = xilinx_axidma_alloc_tx_segment(chan); 1965 if (!segment) 1966 goto error; 1967 1968 hw = &segment->hw; 1969 1970 /* Fill in the descriptor */ 1971 if (xt->dir != DMA_MEM_TO_DEV) 1972 hw->buf_addr = xt->dst_start; 1973 else 1974 hw->buf_addr = xt->src_start; 1975 1976 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 1977 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 1978 XILINX_DMA_BD_VSIZE_MASK; 1979 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 1980 XILINX_DMA_BD_STRIDE_MASK; 1981 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 1982 1983 /* 1984 * Insert the segment into the descriptor segments 1985 * list. 1986 */ 1987 list_add_tail(&segment->node, &desc->segments); 1988 1989 1990 segment = list_first_entry(&desc->segments, 1991 struct xilinx_axidma_tx_segment, node); 1992 desc->async_tx.phys = segment->phys; 1993 1994 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1995 if (xt->dir == DMA_MEM_TO_DEV) { 1996 segment->hw.control |= XILINX_DMA_BD_SOP; 1997 segment = list_last_entry(&desc->segments, 1998 struct xilinx_axidma_tx_segment, 1999 node); 2000 segment->hw.control |= XILINX_DMA_BD_EOP; 2001 } 2002 2003 return &desc->async_tx; 2004 2005 error: 2006 xilinx_dma_free_tx_descriptor(chan, desc); 2007 return NULL; 2008 } 2009 2010 /** 2011 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2012 * @chan: Driver specific DMA Channel pointer 2013 */ 2014 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2015 { 2016 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2017 u32 reg; 2018 int err; 2019 2020 if (chan->cyclic) 2021 xilinx_dma_chan_reset(chan); 2022 2023 err = chan->stop_transfer(chan); 2024 if (err) { 2025 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2026 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 2027 chan->err = true; 2028 } 2029 2030 /* Remove and free all of the descriptors in the lists */ 2031 xilinx_dma_free_descriptors(chan); 2032 2033 if (chan->cyclic) { 2034 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2035 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2036 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2037 chan->cyclic = false; 2038 } 2039 2040 return 0; 2041 } 2042 2043 /** 2044 * xilinx_dma_channel_set_config - Configure VDMA channel 2045 * Run-time configuration for Axi VDMA, supports: 2046 * . halt the channel 2047 * . configure interrupt coalescing and inter-packet delay threshold 2048 * . start/stop parking 2049 * . enable genlock 2050 * 2051 * @dchan: DMA channel 2052 * @cfg: VDMA device configuration pointer 2053 * 2054 * Return: '0' on success and failure value on error 2055 */ 2056 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2057 struct xilinx_vdma_config *cfg) 2058 { 2059 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2060 u32 dmacr; 2061 2062 if (cfg->reset) 2063 return xilinx_dma_chan_reset(chan); 2064 2065 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2066 2067 chan->config.frm_dly = cfg->frm_dly; 2068 chan->config.park = cfg->park; 2069 2070 /* genlock settings */ 2071 chan->config.gen_lock = cfg->gen_lock; 2072 chan->config.master = cfg->master; 2073 2074 if (cfg->gen_lock && chan->genlock) { 2075 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2076 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2077 } 2078 2079 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2080 if (cfg->park) 2081 chan->config.park_frm = cfg->park_frm; 2082 else 2083 chan->config.park_frm = -1; 2084 2085 chan->config.coalesc = cfg->coalesc; 2086 chan->config.delay = cfg->delay; 2087 2088 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2089 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2090 chan->config.coalesc = cfg->coalesc; 2091 } 2092 2093 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2094 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2095 chan->config.delay = cfg->delay; 2096 } 2097 2098 /* FSync Source selection */ 2099 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2100 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2101 2102 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2103 2104 return 0; 2105 } 2106 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2107 2108 /* ----------------------------------------------------------------------------- 2109 * Probe and remove 2110 */ 2111 2112 /** 2113 * xilinx_dma_chan_remove - Per Channel remove function 2114 * @chan: Driver specific DMA channel 2115 */ 2116 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2117 { 2118 /* Disable all interrupts */ 2119 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2120 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2121 2122 if (chan->irq > 0) 2123 free_irq(chan->irq, chan); 2124 2125 tasklet_kill(&chan->tasklet); 2126 2127 list_del(&chan->common.device_node); 2128 } 2129 2130 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2131 struct clk **tx_clk, struct clk **rx_clk, 2132 struct clk **sg_clk, struct clk **tmp_clk) 2133 { 2134 int err; 2135 2136 *tmp_clk = NULL; 2137 2138 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2139 if (IS_ERR(*axi_clk)) { 2140 err = PTR_ERR(*axi_clk); 2141 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2142 return err; 2143 } 2144 2145 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2146 if (IS_ERR(*tx_clk)) 2147 *tx_clk = NULL; 2148 2149 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2150 if (IS_ERR(*rx_clk)) 2151 *rx_clk = NULL; 2152 2153 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2154 if (IS_ERR(*sg_clk)) 2155 *sg_clk = NULL; 2156 2157 err = clk_prepare_enable(*axi_clk); 2158 if (err) { 2159 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2160 return err; 2161 } 2162 2163 err = clk_prepare_enable(*tx_clk); 2164 if (err) { 2165 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2166 goto err_disable_axiclk; 2167 } 2168 2169 err = clk_prepare_enable(*rx_clk); 2170 if (err) { 2171 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2172 goto err_disable_txclk; 2173 } 2174 2175 err = clk_prepare_enable(*sg_clk); 2176 if (err) { 2177 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2178 goto err_disable_rxclk; 2179 } 2180 2181 return 0; 2182 2183 err_disable_rxclk: 2184 clk_disable_unprepare(*rx_clk); 2185 err_disable_txclk: 2186 clk_disable_unprepare(*tx_clk); 2187 err_disable_axiclk: 2188 clk_disable_unprepare(*axi_clk); 2189 2190 return err; 2191 } 2192 2193 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2194 struct clk **dev_clk, struct clk **tmp_clk, 2195 struct clk **tmp1_clk, struct clk **tmp2_clk) 2196 { 2197 int err; 2198 2199 *tmp_clk = NULL; 2200 *tmp1_clk = NULL; 2201 *tmp2_clk = NULL; 2202 2203 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2204 if (IS_ERR(*axi_clk)) { 2205 err = PTR_ERR(*axi_clk); 2206 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 2207 return err; 2208 } 2209 2210 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2211 if (IS_ERR(*dev_clk)) { 2212 err = PTR_ERR(*dev_clk); 2213 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 2214 return err; 2215 } 2216 2217 err = clk_prepare_enable(*axi_clk); 2218 if (err) { 2219 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2220 return err; 2221 } 2222 2223 err = clk_prepare_enable(*dev_clk); 2224 if (err) { 2225 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2226 goto err_disable_axiclk; 2227 } 2228 2229 return 0; 2230 2231 err_disable_axiclk: 2232 clk_disable_unprepare(*axi_clk); 2233 2234 return err; 2235 } 2236 2237 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2238 struct clk **tx_clk, struct clk **txs_clk, 2239 struct clk **rx_clk, struct clk **rxs_clk) 2240 { 2241 int err; 2242 2243 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2244 if (IS_ERR(*axi_clk)) { 2245 err = PTR_ERR(*axi_clk); 2246 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2247 return err; 2248 } 2249 2250 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2251 if (IS_ERR(*tx_clk)) 2252 *tx_clk = NULL; 2253 2254 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2255 if (IS_ERR(*txs_clk)) 2256 *txs_clk = NULL; 2257 2258 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2259 if (IS_ERR(*rx_clk)) 2260 *rx_clk = NULL; 2261 2262 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2263 if (IS_ERR(*rxs_clk)) 2264 *rxs_clk = NULL; 2265 2266 err = clk_prepare_enable(*axi_clk); 2267 if (err) { 2268 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2269 return err; 2270 } 2271 2272 err = clk_prepare_enable(*tx_clk); 2273 if (err) { 2274 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2275 goto err_disable_axiclk; 2276 } 2277 2278 err = clk_prepare_enable(*txs_clk); 2279 if (err) { 2280 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2281 goto err_disable_txclk; 2282 } 2283 2284 err = clk_prepare_enable(*rx_clk); 2285 if (err) { 2286 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2287 goto err_disable_txsclk; 2288 } 2289 2290 err = clk_prepare_enable(*rxs_clk); 2291 if (err) { 2292 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2293 goto err_disable_rxclk; 2294 } 2295 2296 return 0; 2297 2298 err_disable_rxclk: 2299 clk_disable_unprepare(*rx_clk); 2300 err_disable_txsclk: 2301 clk_disable_unprepare(*txs_clk); 2302 err_disable_txclk: 2303 clk_disable_unprepare(*tx_clk); 2304 err_disable_axiclk: 2305 clk_disable_unprepare(*axi_clk); 2306 2307 return err; 2308 } 2309 2310 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2311 { 2312 clk_disable_unprepare(xdev->rxs_clk); 2313 clk_disable_unprepare(xdev->rx_clk); 2314 clk_disable_unprepare(xdev->txs_clk); 2315 clk_disable_unprepare(xdev->tx_clk); 2316 clk_disable_unprepare(xdev->axi_clk); 2317 } 2318 2319 /** 2320 * xilinx_dma_chan_probe - Per Channel Probing 2321 * It get channel features from the device tree entry and 2322 * initialize special channel handling routines 2323 * 2324 * @xdev: Driver specific device structure 2325 * @node: Device node 2326 * 2327 * Return: '0' on success and failure value on error 2328 */ 2329 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2330 struct device_node *node, int chan_id) 2331 { 2332 struct xilinx_dma_chan *chan; 2333 bool has_dre = false; 2334 u32 value, width; 2335 int err; 2336 2337 /* Allocate and initialize the channel structure */ 2338 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2339 if (!chan) 2340 return -ENOMEM; 2341 2342 chan->dev = xdev->dev; 2343 chan->xdev = xdev; 2344 chan->has_sg = xdev->has_sg; 2345 chan->desc_pendingcount = 0x0; 2346 chan->ext_addr = xdev->ext_addr; 2347 2348 spin_lock_init(&chan->lock); 2349 INIT_LIST_HEAD(&chan->pending_list); 2350 INIT_LIST_HEAD(&chan->done_list); 2351 INIT_LIST_HEAD(&chan->active_list); 2352 2353 /* Retrieve the channel properties from the device tree */ 2354 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2355 2356 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2357 2358 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2359 if (err) { 2360 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2361 return err; 2362 } 2363 width = value >> 3; /* Convert bits to bytes */ 2364 2365 /* If data width is greater than 8 bytes, DRE is not in hw */ 2366 if (width > 8) 2367 has_dre = false; 2368 2369 if (!has_dre) 2370 xdev->common.copy_align = fls(width - 1); 2371 2372 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2373 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2374 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2375 chan->direction = DMA_MEM_TO_DEV; 2376 chan->id = chan_id; 2377 chan->tdest = chan_id; 2378 2379 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2380 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2381 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2382 2383 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2384 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2385 chan->flush_on_fsync = true; 2386 } 2387 } else if (of_device_is_compatible(node, 2388 "xlnx,axi-vdma-s2mm-channel") || 2389 of_device_is_compatible(node, 2390 "xlnx,axi-dma-s2mm-channel")) { 2391 chan->direction = DMA_DEV_TO_MEM; 2392 chan->id = chan_id; 2393 chan->tdest = chan_id - xdev->nr_channels; 2394 2395 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2396 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2397 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2398 2399 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2400 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2401 chan->flush_on_fsync = true; 2402 } 2403 } else { 2404 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2405 return -EINVAL; 2406 } 2407 2408 /* Request the interrupt */ 2409 chan->irq = irq_of_parse_and_map(node, 0); 2410 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2411 "xilinx-dma-controller", chan); 2412 if (err) { 2413 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2414 return err; 2415 } 2416 2417 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2418 chan->start_transfer = xilinx_dma_start_transfer; 2419 chan->stop_transfer = xilinx_dma_stop_transfer; 2420 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2421 chan->start_transfer = xilinx_cdma_start_transfer; 2422 chan->stop_transfer = xilinx_cdma_stop_transfer; 2423 } else { 2424 chan->start_transfer = xilinx_vdma_start_transfer; 2425 chan->stop_transfer = xilinx_dma_stop_transfer; 2426 } 2427 2428 /* Initialize the tasklet */ 2429 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2430 (unsigned long)chan); 2431 2432 /* 2433 * Initialize the DMA channel and add it to the DMA engine channels 2434 * list. 2435 */ 2436 chan->common.device = &xdev->common; 2437 2438 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2439 xdev->chan[chan->id] = chan; 2440 2441 /* Reset the channel */ 2442 err = xilinx_dma_chan_reset(chan); 2443 if (err < 0) { 2444 dev_err(xdev->dev, "Reset channel failed\n"); 2445 return err; 2446 } 2447 2448 return 0; 2449 } 2450 2451 /** 2452 * xilinx_dma_child_probe - Per child node probe 2453 * It get number of dma-channels per child node from 2454 * device-tree and initializes all the channels. 2455 * 2456 * @xdev: Driver specific device structure 2457 * @node: Device node 2458 * 2459 * Return: 0 always. 2460 */ 2461 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2462 struct device_node *node) { 2463 int ret, i, nr_channels = 1; 2464 2465 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2466 if ((ret < 0) && xdev->mcdma) 2467 dev_warn(xdev->dev, "missing dma-channels property\n"); 2468 2469 for (i = 0; i < nr_channels; i++) 2470 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2471 2472 xdev->nr_channels += nr_channels; 2473 2474 return 0; 2475 } 2476 2477 /** 2478 * of_dma_xilinx_xlate - Translation function 2479 * @dma_spec: Pointer to DMA specifier as found in the device tree 2480 * @ofdma: Pointer to DMA controller data 2481 * 2482 * Return: DMA channel pointer on success and NULL on error 2483 */ 2484 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2485 struct of_dma *ofdma) 2486 { 2487 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2488 int chan_id = dma_spec->args[0]; 2489 2490 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2491 return NULL; 2492 2493 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2494 } 2495 2496 static const struct xilinx_dma_config axidma_config = { 2497 .dmatype = XDMA_TYPE_AXIDMA, 2498 .clk_init = axidma_clk_init, 2499 }; 2500 2501 static const struct xilinx_dma_config axicdma_config = { 2502 .dmatype = XDMA_TYPE_CDMA, 2503 .clk_init = axicdma_clk_init, 2504 }; 2505 2506 static const struct xilinx_dma_config axivdma_config = { 2507 .dmatype = XDMA_TYPE_VDMA, 2508 .clk_init = axivdma_clk_init, 2509 }; 2510 2511 static const struct of_device_id xilinx_dma_of_ids[] = { 2512 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2513 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2514 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2515 {} 2516 }; 2517 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2518 2519 /** 2520 * xilinx_dma_probe - Driver probe function 2521 * @pdev: Pointer to the platform_device structure 2522 * 2523 * Return: '0' on success and failure value on error 2524 */ 2525 static int xilinx_dma_probe(struct platform_device *pdev) 2526 { 2527 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2528 struct clk **, struct clk **, struct clk **) 2529 = axivdma_clk_init; 2530 struct device_node *node = pdev->dev.of_node; 2531 struct xilinx_dma_device *xdev; 2532 struct device_node *child, *np = pdev->dev.of_node; 2533 struct resource *io; 2534 u32 num_frames, addr_width; 2535 int i, err; 2536 2537 /* Allocate and initialize the DMA engine structure */ 2538 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2539 if (!xdev) 2540 return -ENOMEM; 2541 2542 xdev->dev = &pdev->dev; 2543 if (np) { 2544 const struct of_device_id *match; 2545 2546 match = of_match_node(xilinx_dma_of_ids, np); 2547 if (match && match->data) { 2548 xdev->dma_config = match->data; 2549 clk_init = xdev->dma_config->clk_init; 2550 } 2551 } 2552 2553 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2554 &xdev->rx_clk, &xdev->rxs_clk); 2555 if (err) 2556 return err; 2557 2558 /* Request and map I/O memory */ 2559 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2560 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2561 if (IS_ERR(xdev->regs)) 2562 return PTR_ERR(xdev->regs); 2563 2564 /* Retrieve the DMA engine properties from the device tree */ 2565 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2566 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2567 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2568 2569 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2570 err = of_property_read_u32(node, "xlnx,num-fstores", 2571 &num_frames); 2572 if (err < 0) { 2573 dev_err(xdev->dev, 2574 "missing xlnx,num-fstores property\n"); 2575 return err; 2576 } 2577 2578 err = of_property_read_u32(node, "xlnx,flush-fsync", 2579 &xdev->flush_on_fsync); 2580 if (err < 0) 2581 dev_warn(xdev->dev, 2582 "missing xlnx,flush-fsync property\n"); 2583 } 2584 2585 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2586 if (err < 0) 2587 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2588 2589 if (addr_width > 32) 2590 xdev->ext_addr = true; 2591 else 2592 xdev->ext_addr = false; 2593 2594 /* Set the dma mask bits */ 2595 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2596 2597 /* Initialize the DMA engine */ 2598 xdev->common.dev = &pdev->dev; 2599 2600 INIT_LIST_HEAD(&xdev->common.channels); 2601 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2602 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2603 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2604 } 2605 2606 xdev->common.device_alloc_chan_resources = 2607 xilinx_dma_alloc_chan_resources; 2608 xdev->common.device_free_chan_resources = 2609 xilinx_dma_free_chan_resources; 2610 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2611 xdev->common.device_tx_status = xilinx_dma_tx_status; 2612 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2613 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2614 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2615 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2616 xdev->common.device_prep_dma_cyclic = 2617 xilinx_dma_prep_dma_cyclic; 2618 xdev->common.device_prep_interleaved_dma = 2619 xilinx_dma_prep_interleaved; 2620 /* Residue calculation is supported by only AXI DMA */ 2621 xdev->common.residue_granularity = 2622 DMA_RESIDUE_GRANULARITY_SEGMENT; 2623 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2624 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2625 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2626 } else { 2627 xdev->common.device_prep_interleaved_dma = 2628 xilinx_vdma_dma_prep_interleaved; 2629 } 2630 2631 platform_set_drvdata(pdev, xdev); 2632 2633 /* Initialize the channels */ 2634 for_each_child_of_node(node, child) { 2635 err = xilinx_dma_child_probe(xdev, child); 2636 if (err < 0) 2637 goto disable_clks; 2638 } 2639 2640 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2641 for (i = 0; i < xdev->nr_channels; i++) 2642 if (xdev->chan[i]) 2643 xdev->chan[i]->num_frms = num_frames; 2644 } 2645 2646 /* Register the DMA engine with the core */ 2647 dma_async_device_register(&xdev->common); 2648 2649 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2650 xdev); 2651 if (err < 0) { 2652 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2653 dma_async_device_unregister(&xdev->common); 2654 goto error; 2655 } 2656 2657 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2658 2659 return 0; 2660 2661 disable_clks: 2662 xdma_disable_allclks(xdev); 2663 error: 2664 for (i = 0; i < xdev->nr_channels; i++) 2665 if (xdev->chan[i]) 2666 xilinx_dma_chan_remove(xdev->chan[i]); 2667 2668 return err; 2669 } 2670 2671 /** 2672 * xilinx_dma_remove - Driver remove function 2673 * @pdev: Pointer to the platform_device structure 2674 * 2675 * Return: Always '0' 2676 */ 2677 static int xilinx_dma_remove(struct platform_device *pdev) 2678 { 2679 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2680 int i; 2681 2682 of_dma_controller_free(pdev->dev.of_node); 2683 2684 dma_async_device_unregister(&xdev->common); 2685 2686 for (i = 0; i < xdev->nr_channels; i++) 2687 if (xdev->chan[i]) 2688 xilinx_dma_chan_remove(xdev->chan[i]); 2689 2690 xdma_disable_allclks(xdev); 2691 2692 return 0; 2693 } 2694 2695 static struct platform_driver xilinx_vdma_driver = { 2696 .driver = { 2697 .name = "xilinx-vdma", 2698 .of_match_table = xilinx_dma_of_ids, 2699 }, 2700 .probe = xilinx_dma_probe, 2701 .remove = xilinx_dma_remove, 2702 }; 2703 2704 module_platform_driver(xilinx_vdma_driver); 2705 2706 MODULE_AUTHOR("Xilinx, Inc."); 2707 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2708 MODULE_LICENSE("GPL v2"); 2709