1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx Video DMA Engine 4 * 5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 6 * 7 * Based on the Freescale DMA driver. 8 * 9 * Description: 10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 11 * core that provides high-bandwidth direct memory access between memory 12 * and AXI4-Stream type video target peripherals. The core provides efficient 13 * two dimensional DMA operations with independent asynchronous read (S2MM) 14 * and write (MM2S) channel operation. It can be configured to have either 15 * one channel or two channels. If configured as two channels, one is to 16 * transmit to the video device (MM2S) and another is to receive from the 17 * video device (S2MM). Initialization, status, interrupt and management 18 * registers are accessed through an AXI4-Lite slave interface. 19 * 20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 21 * provides high-bandwidth one dimensional direct memory access between memory 22 * and AXI4-Stream target peripherals. It supports one receive and one 23 * transmit channel, both of them optional at synthesis time. 24 * 25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 26 * Access (DMA) between a memory-mapped source address and a memory-mapped 27 * destination address. 28 */ 29 30 #include <linux/bitops.h> 31 #include <linux/dmapool.h> 32 #include <linux/dma/xilinx_dma.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/io.h> 36 #include <linux/iopoll.h> 37 #include <linux/module.h> 38 #include <linux/of_address.h> 39 #include <linux/of_dma.h> 40 #include <linux/of_platform.h> 41 #include <linux/of_irq.h> 42 #include <linux/slab.h> 43 #include <linux/clk.h> 44 #include <linux/io-64-nonatomic-lo-hi.h> 45 46 #include "../dmaengine.h" 47 48 /* Register/Descriptor Offsets */ 49 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 50 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 51 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 52 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 53 54 /* Control Registers */ 55 #define XILINX_DMA_REG_DMACR 0x0000 56 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 57 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 58 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 59 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 60 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 61 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 62 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 63 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 64 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 65 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 66 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 67 #define XILINX_DMA_DMACR_RESET BIT(2) 68 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 69 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 70 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 71 72 #define XILINX_DMA_REG_DMASR 0x0004 73 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 74 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 75 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 76 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 77 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 78 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 79 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 80 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 81 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 82 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 83 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 84 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 85 #define XILINX_DMA_DMASR_SG_MASK BIT(3) 86 #define XILINX_DMA_DMASR_IDLE BIT(1) 87 #define XILINX_DMA_DMASR_HALTED BIT(0) 88 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 89 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 90 91 #define XILINX_DMA_REG_CURDESC 0x0008 92 #define XILINX_DMA_REG_TAILDESC 0x0010 93 #define XILINX_DMA_REG_REG_INDEX 0x0014 94 #define XILINX_DMA_REG_FRMSTORE 0x0018 95 #define XILINX_DMA_REG_THRESHOLD 0x001c 96 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 97 #define XILINX_DMA_REG_PARK_PTR 0x0028 98 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 99 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 100 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 101 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 102 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 103 104 /* Register Direct Mode Registers */ 105 #define XILINX_DMA_REG_VSIZE 0x0000 106 #define XILINX_DMA_REG_HSIZE 0x0004 107 108 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 109 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 110 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 111 112 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 113 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 114 115 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 116 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 117 118 /* HW specific definitions */ 119 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 120 121 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 124 XILINX_DMA_DMASR_ERR_IRQ) 125 126 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 128 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 129 XILINX_DMA_DMASR_SG_DEC_ERR | \ 130 XILINX_DMA_DMASR_SG_SLV_ERR | \ 131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 133 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 135 XILINX_DMA_DMASR_DMA_INT_ERR) 136 137 /* 138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 140 * is enabled in the h/w system. 141 */ 142 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 146 XILINX_DMA_DMASR_DMA_INT_ERR) 147 148 /* Axi VDMA Flush on Fsync bits */ 149 #define XILINX_DMA_FLUSH_S2MM 3 150 #define XILINX_DMA_FLUSH_MM2S 2 151 #define XILINX_DMA_FLUSH_BOTH 1 152 153 /* Delay loop counter to prevent hardware failure */ 154 #define XILINX_DMA_LOOP_COUNT 1000000 155 156 /* AXI DMA Specific Registers/Offsets */ 157 #define XILINX_DMA_REG_SRCDSTADDR 0x18 158 #define XILINX_DMA_REG_BTT 0x28 159 160 /* AXI DMA Specific Masks/Bit fields */ 161 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 162 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 163 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 164 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 165 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 166 #define XILINX_DMA_CR_COALESCE_SHIFT 16 167 #define XILINX_DMA_BD_SOP BIT(27) 168 #define XILINX_DMA_BD_EOP BIT(26) 169 #define XILINX_DMA_COALESCE_MAX 255 170 #define XILINX_DMA_NUM_DESCS 255 171 #define XILINX_DMA_NUM_APP_WORDS 5 172 173 /* Multi-Channel DMA Descriptor offsets*/ 174 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 175 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 176 177 /* Multi-Channel DMA Masks/Shifts */ 178 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 179 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 180 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 181 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 182 #define XILINX_DMA_BD_STRIDE_SHIFT 0 183 #define XILINX_DMA_BD_VSIZE_SHIFT 19 184 185 /* AXI CDMA Specific Registers/Offsets */ 186 #define XILINX_CDMA_REG_SRCADDR 0x18 187 #define XILINX_CDMA_REG_DSTADDR 0x20 188 189 /* AXI CDMA Specific Masks */ 190 #define XILINX_CDMA_CR_SGMODE BIT(3) 191 192 #define xilinx_prep_dma_addr_t(addr) \ 193 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) 194 /** 195 * struct xilinx_vdma_desc_hw - Hardware Descriptor 196 * @next_desc: Next Descriptor Pointer @0x00 197 * @pad1: Reserved @0x04 198 * @buf_addr: Buffer address @0x08 199 * @buf_addr_msb: MSB of Buffer address @0x0C 200 * @vsize: Vertical Size @0x10 201 * @hsize: Horizontal Size @0x14 202 * @stride: Number of bytes between the first 203 * pixels of each horizontal line @0x18 204 */ 205 struct xilinx_vdma_desc_hw { 206 u32 next_desc; 207 u32 pad1; 208 u32 buf_addr; 209 u32 buf_addr_msb; 210 u32 vsize; 211 u32 hsize; 212 u32 stride; 213 } __aligned(64); 214 215 /** 216 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 217 * @next_desc: Next Descriptor Pointer @0x00 218 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 219 * @buf_addr: Buffer address @0x08 220 * @buf_addr_msb: MSB of Buffer address @0x0C 221 * @mcdma_control: Control field for mcdma @0x10 222 * @vsize_stride: Vsize and Stride field for mcdma @0x14 223 * @control: Control field @0x18 224 * @status: Status field @0x1C 225 * @app: APP Fields @0x20 - 0x30 226 */ 227 struct xilinx_axidma_desc_hw { 228 u32 next_desc; 229 u32 next_desc_msb; 230 u32 buf_addr; 231 u32 buf_addr_msb; 232 u32 mcdma_control; 233 u32 vsize_stride; 234 u32 control; 235 u32 status; 236 u32 app[XILINX_DMA_NUM_APP_WORDS]; 237 } __aligned(64); 238 239 /** 240 * struct xilinx_cdma_desc_hw - Hardware Descriptor 241 * @next_desc: Next Descriptor Pointer @0x00 242 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 243 * @src_addr: Source address @0x08 244 * @src_addr_msb: Source address MSB @0x0C 245 * @dest_addr: Destination address @0x10 246 * @dest_addr_msb: Destination address MSB @0x14 247 * @control: Control field @0x18 248 * @status: Status field @0x1C 249 */ 250 struct xilinx_cdma_desc_hw { 251 u32 next_desc; 252 u32 next_desc_msb; 253 u32 src_addr; 254 u32 src_addr_msb; 255 u32 dest_addr; 256 u32 dest_addr_msb; 257 u32 control; 258 u32 status; 259 } __aligned(64); 260 261 /** 262 * struct xilinx_vdma_tx_segment - Descriptor segment 263 * @hw: Hardware descriptor 264 * @node: Node in the descriptor segments list 265 * @phys: Physical address of segment 266 */ 267 struct xilinx_vdma_tx_segment { 268 struct xilinx_vdma_desc_hw hw; 269 struct list_head node; 270 dma_addr_t phys; 271 } __aligned(64); 272 273 /** 274 * struct xilinx_axidma_tx_segment - Descriptor segment 275 * @hw: Hardware descriptor 276 * @node: Node in the descriptor segments list 277 * @phys: Physical address of segment 278 */ 279 struct xilinx_axidma_tx_segment { 280 struct xilinx_axidma_desc_hw hw; 281 struct list_head node; 282 dma_addr_t phys; 283 } __aligned(64); 284 285 /** 286 * struct xilinx_cdma_tx_segment - Descriptor segment 287 * @hw: Hardware descriptor 288 * @node: Node in the descriptor segments list 289 * @phys: Physical address of segment 290 */ 291 struct xilinx_cdma_tx_segment { 292 struct xilinx_cdma_desc_hw hw; 293 struct list_head node; 294 dma_addr_t phys; 295 } __aligned(64); 296 297 /** 298 * struct xilinx_dma_tx_descriptor - Per Transaction structure 299 * @async_tx: Async transaction descriptor 300 * @segments: TX segments list 301 * @node: Node in the channel descriptors list 302 * @cyclic: Check for cyclic transfers. 303 */ 304 struct xilinx_dma_tx_descriptor { 305 struct dma_async_tx_descriptor async_tx; 306 struct list_head segments; 307 struct list_head node; 308 bool cyclic; 309 }; 310 311 /** 312 * struct xilinx_dma_chan - Driver specific DMA channel structure 313 * @xdev: Driver specific device structure 314 * @ctrl_offset: Control registers offset 315 * @desc_offset: TX descriptor registers offset 316 * @lock: Descriptor operation lock 317 * @pending_list: Descriptors waiting 318 * @active_list: Descriptors ready to submit 319 * @done_list: Complete descriptors 320 * @free_seg_list: Free descriptors 321 * @common: DMA common channel 322 * @desc_pool: Descriptors pool 323 * @dev: The dma device 324 * @irq: Channel IRQ 325 * @id: Channel ID 326 * @direction: Transfer direction 327 * @num_frms: Number of frames 328 * @has_sg: Support scatter transfers 329 * @cyclic: Check for cyclic transfers. 330 * @genlock: Support genlock mode 331 * @err: Channel has errors 332 * @idle: Check for channel idle 333 * @tasklet: Cleanup work after irq 334 * @config: Device configuration info 335 * @flush_on_fsync: Flush on Frame sync 336 * @desc_pendingcount: Descriptor pending count 337 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 338 * @desc_submitcount: Descriptor h/w submitted count 339 * @residue: Residue for AXI DMA 340 * @seg_v: Statically allocated segments base 341 * @seg_p: Physical allocated segments base 342 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 343 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 344 * @start_transfer: Differentiate b/w DMA IP's transfer 345 * @stop_transfer: Differentiate b/w DMA IP's quiesce 346 * @tdest: TDEST value for mcdma 347 * @has_vflip: S2MM vertical flip 348 */ 349 struct xilinx_dma_chan { 350 struct xilinx_dma_device *xdev; 351 u32 ctrl_offset; 352 u32 desc_offset; 353 spinlock_t lock; 354 struct list_head pending_list; 355 struct list_head active_list; 356 struct list_head done_list; 357 struct list_head free_seg_list; 358 struct dma_chan common; 359 struct dma_pool *desc_pool; 360 struct device *dev; 361 int irq; 362 int id; 363 enum dma_transfer_direction direction; 364 int num_frms; 365 bool has_sg; 366 bool cyclic; 367 bool genlock; 368 bool err; 369 bool idle; 370 struct tasklet_struct tasklet; 371 struct xilinx_vdma_config config; 372 bool flush_on_fsync; 373 u32 desc_pendingcount; 374 bool ext_addr; 375 u32 desc_submitcount; 376 u32 residue; 377 struct xilinx_axidma_tx_segment *seg_v; 378 dma_addr_t seg_p; 379 struct xilinx_axidma_tx_segment *cyclic_seg_v; 380 dma_addr_t cyclic_seg_p; 381 void (*start_transfer)(struct xilinx_dma_chan *chan); 382 int (*stop_transfer)(struct xilinx_dma_chan *chan); 383 u16 tdest; 384 bool has_vflip; 385 }; 386 387 /** 388 * enum xdma_ip_type - DMA IP type. 389 * 390 * @XDMA_TYPE_AXIDMA: Axi dma ip. 391 * @XDMA_TYPE_CDMA: Axi cdma ip. 392 * @XDMA_TYPE_VDMA: Axi vdma ip. 393 * 394 */ 395 enum xdma_ip_type { 396 XDMA_TYPE_AXIDMA = 0, 397 XDMA_TYPE_CDMA, 398 XDMA_TYPE_VDMA, 399 }; 400 401 struct xilinx_dma_config { 402 enum xdma_ip_type dmatype; 403 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 404 struct clk **tx_clk, struct clk **txs_clk, 405 struct clk **rx_clk, struct clk **rxs_clk); 406 }; 407 408 /** 409 * struct xilinx_dma_device - DMA device structure 410 * @regs: I/O mapped base address 411 * @dev: Device Structure 412 * @common: DMA device structure 413 * @chan: Driver specific DMA channel 414 * @mcdma: Specifies whether Multi-Channel is present or not 415 * @flush_on_fsync: Flush on frame sync 416 * @ext_addr: Indicates 64 bit addressing is supported by dma device 417 * @pdev: Platform device structure pointer 418 * @dma_config: DMA config structure 419 * @axi_clk: DMA Axi4-lite interace clock 420 * @tx_clk: DMA mm2s clock 421 * @txs_clk: DMA mm2s stream clock 422 * @rx_clk: DMA s2mm clock 423 * @rxs_clk: DMA s2mm stream clock 424 * @nr_channels: Number of channels DMA device supports 425 * @chan_id: DMA channel identifier 426 * @max_buffer_len: Max buffer length 427 */ 428 struct xilinx_dma_device { 429 void __iomem *regs; 430 struct device *dev; 431 struct dma_device common; 432 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 433 bool mcdma; 434 u32 flush_on_fsync; 435 bool ext_addr; 436 struct platform_device *pdev; 437 const struct xilinx_dma_config *dma_config; 438 struct clk *axi_clk; 439 struct clk *tx_clk; 440 struct clk *txs_clk; 441 struct clk *rx_clk; 442 struct clk *rxs_clk; 443 u32 nr_channels; 444 u32 chan_id; 445 u32 max_buffer_len; 446 }; 447 448 /* Macros */ 449 #define to_xilinx_chan(chan) \ 450 container_of(chan, struct xilinx_dma_chan, common) 451 #define to_dma_tx_descriptor(tx) \ 452 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 453 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 454 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 455 cond, delay_us, timeout_us) 456 457 /* IO accessors */ 458 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 459 { 460 return ioread32(chan->xdev->regs + reg); 461 } 462 463 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 464 { 465 iowrite32(value, chan->xdev->regs + reg); 466 } 467 468 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 469 u32 value) 470 { 471 dma_write(chan, chan->desc_offset + reg, value); 472 } 473 474 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 475 { 476 return dma_read(chan, chan->ctrl_offset + reg); 477 } 478 479 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 480 u32 value) 481 { 482 dma_write(chan, chan->ctrl_offset + reg, value); 483 } 484 485 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 486 u32 clr) 487 { 488 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 489 } 490 491 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 492 u32 set) 493 { 494 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 495 } 496 497 /** 498 * vdma_desc_write_64 - 64-bit descriptor write 499 * @chan: Driver specific VDMA channel 500 * @reg: Register to write 501 * @value_lsb: lower address of the descriptor. 502 * @value_msb: upper address of the descriptor. 503 * 504 * Since vdma driver is trying to write to a register offset which is not a 505 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 506 * instead of a single 64 bit register write. 507 */ 508 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 509 u32 value_lsb, u32 value_msb) 510 { 511 /* Write the lsb 32 bits*/ 512 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 513 514 /* Write the msb 32 bits */ 515 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 516 } 517 518 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 519 { 520 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 521 } 522 523 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 524 dma_addr_t addr) 525 { 526 if (chan->ext_addr) 527 dma_writeq(chan, reg, addr); 528 else 529 dma_ctrl_write(chan, reg, addr); 530 } 531 532 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 533 struct xilinx_axidma_desc_hw *hw, 534 dma_addr_t buf_addr, size_t sg_used, 535 size_t period_len) 536 { 537 if (chan->ext_addr) { 538 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 539 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 540 period_len); 541 } else { 542 hw->buf_addr = buf_addr + sg_used + period_len; 543 } 544 } 545 546 /* ----------------------------------------------------------------------------- 547 * Descriptors and segments alloc and free 548 */ 549 550 /** 551 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 552 * @chan: Driver specific DMA channel 553 * 554 * Return: The allocated segment on success and NULL on failure. 555 */ 556 static struct xilinx_vdma_tx_segment * 557 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 558 { 559 struct xilinx_vdma_tx_segment *segment; 560 dma_addr_t phys; 561 562 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 563 if (!segment) 564 return NULL; 565 566 segment->phys = phys; 567 568 return segment; 569 } 570 571 /** 572 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 573 * @chan: Driver specific DMA channel 574 * 575 * Return: The allocated segment on success and NULL on failure. 576 */ 577 static struct xilinx_cdma_tx_segment * 578 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 579 { 580 struct xilinx_cdma_tx_segment *segment; 581 dma_addr_t phys; 582 583 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 584 if (!segment) 585 return NULL; 586 587 segment->phys = phys; 588 589 return segment; 590 } 591 592 /** 593 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 594 * @chan: Driver specific DMA channel 595 * 596 * Return: The allocated segment on success and NULL on failure. 597 */ 598 static struct xilinx_axidma_tx_segment * 599 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 600 { 601 struct xilinx_axidma_tx_segment *segment = NULL; 602 unsigned long flags; 603 604 spin_lock_irqsave(&chan->lock, flags); 605 if (!list_empty(&chan->free_seg_list)) { 606 segment = list_first_entry(&chan->free_seg_list, 607 struct xilinx_axidma_tx_segment, 608 node); 609 list_del(&segment->node); 610 } 611 spin_unlock_irqrestore(&chan->lock, flags); 612 613 return segment; 614 } 615 616 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 617 { 618 u32 next_desc = hw->next_desc; 619 u32 next_desc_msb = hw->next_desc_msb; 620 621 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 622 623 hw->next_desc = next_desc; 624 hw->next_desc_msb = next_desc_msb; 625 } 626 627 /** 628 * xilinx_dma_free_tx_segment - Free transaction segment 629 * @chan: Driver specific DMA channel 630 * @segment: DMA transaction segment 631 */ 632 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 633 struct xilinx_axidma_tx_segment *segment) 634 { 635 xilinx_dma_clean_hw_desc(&segment->hw); 636 637 list_add_tail(&segment->node, &chan->free_seg_list); 638 } 639 640 /** 641 * xilinx_cdma_free_tx_segment - Free transaction segment 642 * @chan: Driver specific DMA channel 643 * @segment: DMA transaction segment 644 */ 645 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 646 struct xilinx_cdma_tx_segment *segment) 647 { 648 dma_pool_free(chan->desc_pool, segment, segment->phys); 649 } 650 651 /** 652 * xilinx_vdma_free_tx_segment - Free transaction segment 653 * @chan: Driver specific DMA channel 654 * @segment: DMA transaction segment 655 */ 656 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 657 struct xilinx_vdma_tx_segment *segment) 658 { 659 dma_pool_free(chan->desc_pool, segment, segment->phys); 660 } 661 662 /** 663 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 664 * @chan: Driver specific DMA channel 665 * 666 * Return: The allocated descriptor on success and NULL on failure. 667 */ 668 static struct xilinx_dma_tx_descriptor * 669 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 670 { 671 struct xilinx_dma_tx_descriptor *desc; 672 673 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 674 if (!desc) 675 return NULL; 676 677 INIT_LIST_HEAD(&desc->segments); 678 679 return desc; 680 } 681 682 /** 683 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 684 * @chan: Driver specific DMA channel 685 * @desc: DMA transaction descriptor 686 */ 687 static void 688 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 689 struct xilinx_dma_tx_descriptor *desc) 690 { 691 struct xilinx_vdma_tx_segment *segment, *next; 692 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 693 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 694 695 if (!desc) 696 return; 697 698 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 699 list_for_each_entry_safe(segment, next, &desc->segments, node) { 700 list_del(&segment->node); 701 xilinx_vdma_free_tx_segment(chan, segment); 702 } 703 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 704 list_for_each_entry_safe(cdma_segment, cdma_next, 705 &desc->segments, node) { 706 list_del(&cdma_segment->node); 707 xilinx_cdma_free_tx_segment(chan, cdma_segment); 708 } 709 } else { 710 list_for_each_entry_safe(axidma_segment, axidma_next, 711 &desc->segments, node) { 712 list_del(&axidma_segment->node); 713 xilinx_dma_free_tx_segment(chan, axidma_segment); 714 } 715 } 716 717 kfree(desc); 718 } 719 720 /* Required functions */ 721 722 /** 723 * xilinx_dma_free_desc_list - Free descriptors list 724 * @chan: Driver specific DMA channel 725 * @list: List to parse and delete the descriptor 726 */ 727 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 728 struct list_head *list) 729 { 730 struct xilinx_dma_tx_descriptor *desc, *next; 731 732 list_for_each_entry_safe(desc, next, list, node) { 733 list_del(&desc->node); 734 xilinx_dma_free_tx_descriptor(chan, desc); 735 } 736 } 737 738 /** 739 * xilinx_dma_free_descriptors - Free channel descriptors 740 * @chan: Driver specific DMA channel 741 */ 742 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 743 { 744 unsigned long flags; 745 746 spin_lock_irqsave(&chan->lock, flags); 747 748 xilinx_dma_free_desc_list(chan, &chan->pending_list); 749 xilinx_dma_free_desc_list(chan, &chan->done_list); 750 xilinx_dma_free_desc_list(chan, &chan->active_list); 751 752 spin_unlock_irqrestore(&chan->lock, flags); 753 } 754 755 /** 756 * xilinx_dma_free_chan_resources - Free channel resources 757 * @dchan: DMA channel 758 */ 759 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 760 { 761 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 762 unsigned long flags; 763 764 dev_dbg(chan->dev, "Free all channel resources.\n"); 765 766 xilinx_dma_free_descriptors(chan); 767 768 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 769 spin_lock_irqsave(&chan->lock, flags); 770 INIT_LIST_HEAD(&chan->free_seg_list); 771 spin_unlock_irqrestore(&chan->lock, flags); 772 773 /* Free memory that is allocated for BD */ 774 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 775 XILINX_DMA_NUM_DESCS, chan->seg_v, 776 chan->seg_p); 777 778 /* Free Memory that is allocated for cyclic DMA Mode */ 779 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 780 chan->cyclic_seg_v, chan->cyclic_seg_p); 781 } 782 783 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 784 dma_pool_destroy(chan->desc_pool); 785 chan->desc_pool = NULL; 786 } 787 } 788 789 /** 790 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 791 * @chan: Driver specific dma channel 792 * @desc: dma transaction descriptor 793 * @flags: flags for spin lock 794 */ 795 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 796 struct xilinx_dma_tx_descriptor *desc, 797 unsigned long *flags) 798 { 799 dma_async_tx_callback callback; 800 void *callback_param; 801 802 callback = desc->async_tx.callback; 803 callback_param = desc->async_tx.callback_param; 804 if (callback) { 805 spin_unlock_irqrestore(&chan->lock, *flags); 806 callback(callback_param); 807 spin_lock_irqsave(&chan->lock, *flags); 808 } 809 } 810 811 /** 812 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 813 * @chan: Driver specific DMA channel 814 */ 815 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 816 { 817 struct xilinx_dma_tx_descriptor *desc, *next; 818 unsigned long flags; 819 820 spin_lock_irqsave(&chan->lock, flags); 821 822 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 823 struct dmaengine_desc_callback cb; 824 825 if (desc->cyclic) { 826 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 827 break; 828 } 829 830 /* Remove from the list of running transactions */ 831 list_del(&desc->node); 832 833 /* Run the link descriptor callback function */ 834 dmaengine_desc_get_callback(&desc->async_tx, &cb); 835 if (dmaengine_desc_callback_valid(&cb)) { 836 spin_unlock_irqrestore(&chan->lock, flags); 837 dmaengine_desc_callback_invoke(&cb, NULL); 838 spin_lock_irqsave(&chan->lock, flags); 839 } 840 841 /* Run any dependencies, then free the descriptor */ 842 dma_run_dependencies(&desc->async_tx); 843 xilinx_dma_free_tx_descriptor(chan, desc); 844 } 845 846 spin_unlock_irqrestore(&chan->lock, flags); 847 } 848 849 /** 850 * xilinx_dma_do_tasklet - Schedule completion tasklet 851 * @data: Pointer to the Xilinx DMA channel structure 852 */ 853 static void xilinx_dma_do_tasklet(unsigned long data) 854 { 855 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 856 857 xilinx_dma_chan_desc_cleanup(chan); 858 } 859 860 /** 861 * xilinx_dma_alloc_chan_resources - Allocate channel resources 862 * @dchan: DMA channel 863 * 864 * Return: '0' on success and failure value on error 865 */ 866 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 867 { 868 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 869 int i; 870 871 /* Has this channel already been allocated? */ 872 if (chan->desc_pool) 873 return 0; 874 875 /* 876 * We need the descriptor to be aligned to 64bytes 877 * for meeting Xilinx VDMA specification requirement. 878 */ 879 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 880 /* Allocate the buffer descriptors. */ 881 chan->seg_v = dma_alloc_coherent(chan->dev, 882 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 883 &chan->seg_p, GFP_KERNEL); 884 if (!chan->seg_v) { 885 dev_err(chan->dev, 886 "unable to allocate channel %d descriptors\n", 887 chan->id); 888 return -ENOMEM; 889 } 890 /* 891 * For cyclic DMA mode we need to program the tail Descriptor 892 * register with a value which is not a part of the BD chain 893 * so allocating a desc segment during channel allocation for 894 * programming tail descriptor. 895 */ 896 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 897 sizeof(*chan->cyclic_seg_v), 898 &chan->cyclic_seg_p, 899 GFP_KERNEL); 900 if (!chan->cyclic_seg_v) { 901 dev_err(chan->dev, 902 "unable to allocate desc segment for cyclic DMA\n"); 903 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 904 XILINX_DMA_NUM_DESCS, chan->seg_v, 905 chan->seg_p); 906 return -ENOMEM; 907 } 908 chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 909 910 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 911 chan->seg_v[i].hw.next_desc = 912 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 913 ((i + 1) % XILINX_DMA_NUM_DESCS)); 914 chan->seg_v[i].hw.next_desc_msb = 915 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 916 ((i + 1) % XILINX_DMA_NUM_DESCS)); 917 chan->seg_v[i].phys = chan->seg_p + 918 sizeof(*chan->seg_v) * i; 919 list_add_tail(&chan->seg_v[i].node, 920 &chan->free_seg_list); 921 } 922 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 923 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 924 chan->dev, 925 sizeof(struct xilinx_cdma_tx_segment), 926 __alignof__(struct xilinx_cdma_tx_segment), 927 0); 928 } else { 929 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 930 chan->dev, 931 sizeof(struct xilinx_vdma_tx_segment), 932 __alignof__(struct xilinx_vdma_tx_segment), 933 0); 934 } 935 936 if (!chan->desc_pool && 937 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 938 dev_err(chan->dev, 939 "unable to allocate channel %d descriptor pool\n", 940 chan->id); 941 return -ENOMEM; 942 } 943 944 dma_cookie_init(dchan); 945 946 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 947 /* For AXI DMA resetting once channel will reset the 948 * other channel as well so enable the interrupts here. 949 */ 950 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 951 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 952 } 953 954 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 955 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 956 XILINX_CDMA_CR_SGMODE); 957 958 return 0; 959 } 960 961 /** 962 * xilinx_dma_calc_copysize - Calculate the amount of data to copy 963 * @chan: Driver specific DMA channel 964 * @size: Total data that needs to be copied 965 * @done: Amount of data that has been already copied 966 * 967 * Return: Amount of data that has to be copied 968 */ 969 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, 970 int size, int done) 971 { 972 size_t copy; 973 974 copy = min_t(size_t, size - done, 975 chan->xdev->max_buffer_len); 976 977 if ((copy + done < size) && 978 chan->xdev->common.copy_align) { 979 /* 980 * If this is not the last descriptor, make sure 981 * the next one will be properly aligned 982 */ 983 copy = rounddown(copy, 984 (1 << chan->xdev->common.copy_align)); 985 } 986 return copy; 987 } 988 989 /** 990 * xilinx_dma_tx_status - Get DMA transaction status 991 * @dchan: DMA channel 992 * @cookie: Transaction identifier 993 * @txstate: Transaction state 994 * 995 * Return: DMA transaction status 996 */ 997 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 998 dma_cookie_t cookie, 999 struct dma_tx_state *txstate) 1000 { 1001 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1002 struct xilinx_dma_tx_descriptor *desc; 1003 struct xilinx_axidma_tx_segment *segment; 1004 struct xilinx_axidma_desc_hw *hw; 1005 enum dma_status ret; 1006 unsigned long flags; 1007 u32 residue = 0; 1008 1009 ret = dma_cookie_status(dchan, cookie, txstate); 1010 if (ret == DMA_COMPLETE || !txstate) 1011 return ret; 1012 1013 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1014 spin_lock_irqsave(&chan->lock, flags); 1015 1016 desc = list_last_entry(&chan->active_list, 1017 struct xilinx_dma_tx_descriptor, node); 1018 if (chan->has_sg) { 1019 list_for_each_entry(segment, &desc->segments, node) { 1020 hw = &segment->hw; 1021 residue += (hw->control - hw->status) & 1022 chan->xdev->max_buffer_len; 1023 } 1024 } 1025 spin_unlock_irqrestore(&chan->lock, flags); 1026 1027 chan->residue = residue; 1028 dma_set_residue(txstate, chan->residue); 1029 } 1030 1031 return ret; 1032 } 1033 1034 /** 1035 * xilinx_dma_stop_transfer - Halt DMA channel 1036 * @chan: Driver specific DMA channel 1037 * 1038 * Return: '0' on success and failure value on error 1039 */ 1040 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1041 { 1042 u32 val; 1043 1044 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1045 1046 /* Wait for the hardware to halt */ 1047 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1048 val & XILINX_DMA_DMASR_HALTED, 0, 1049 XILINX_DMA_LOOP_COUNT); 1050 } 1051 1052 /** 1053 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1054 * @chan: Driver specific DMA channel 1055 * 1056 * Return: '0' on success and failure value on error 1057 */ 1058 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1059 { 1060 u32 val; 1061 1062 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1063 val & XILINX_DMA_DMASR_IDLE, 0, 1064 XILINX_DMA_LOOP_COUNT); 1065 } 1066 1067 /** 1068 * xilinx_dma_start - Start DMA channel 1069 * @chan: Driver specific DMA channel 1070 */ 1071 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 1072 { 1073 int err; 1074 u32 val; 1075 1076 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1077 1078 /* Wait for the hardware to start */ 1079 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1080 !(val & XILINX_DMA_DMASR_HALTED), 0, 1081 XILINX_DMA_LOOP_COUNT); 1082 1083 if (err) { 1084 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1085 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1086 1087 chan->err = true; 1088 } 1089 } 1090 1091 /** 1092 * xilinx_vdma_start_transfer - Starts VDMA transfer 1093 * @chan: Driver specific channel struct pointer 1094 */ 1095 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1096 { 1097 struct xilinx_vdma_config *config = &chan->config; 1098 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1099 u32 reg, j; 1100 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1101 int i = 0; 1102 1103 /* This function was invoked with lock held */ 1104 if (chan->err) 1105 return; 1106 1107 if (!chan->idle) 1108 return; 1109 1110 if (list_empty(&chan->pending_list)) 1111 return; 1112 1113 desc = list_first_entry(&chan->pending_list, 1114 struct xilinx_dma_tx_descriptor, node); 1115 tail_desc = list_last_entry(&chan->pending_list, 1116 struct xilinx_dma_tx_descriptor, node); 1117 1118 /* Configure the hardware using info in the config structure */ 1119 if (chan->has_vflip) { 1120 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1121 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1122 reg |= config->vflip_en; 1123 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1124 reg); 1125 } 1126 1127 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1128 1129 if (config->frm_cnt_en) 1130 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1131 else 1132 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1133 1134 /* If not parking, enable circular mode */ 1135 if (config->park) 1136 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1137 else 1138 reg |= XILINX_DMA_DMACR_CIRC_EN; 1139 1140 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1141 1142 j = chan->desc_submitcount; 1143 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 1144 if (chan->direction == DMA_MEM_TO_DEV) { 1145 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 1146 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 1147 } else { 1148 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 1149 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 1150 } 1151 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 1152 1153 /* Start the hardware */ 1154 xilinx_dma_start(chan); 1155 1156 if (chan->err) 1157 return; 1158 1159 /* Start the transfer */ 1160 if (chan->desc_submitcount < chan->num_frms) 1161 i = chan->desc_submitcount; 1162 1163 list_for_each_entry(segment, &desc->segments, node) { 1164 if (chan->ext_addr) 1165 vdma_desc_write_64(chan, 1166 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1167 segment->hw.buf_addr, 1168 segment->hw.buf_addr_msb); 1169 else 1170 vdma_desc_write(chan, 1171 XILINX_VDMA_REG_START_ADDRESS(i++), 1172 segment->hw.buf_addr); 1173 1174 last = segment; 1175 } 1176 1177 if (!last) 1178 return; 1179 1180 /* HW expects these parameters to be same for one transaction */ 1181 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1182 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1183 last->hw.stride); 1184 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1185 1186 chan->desc_submitcount++; 1187 chan->desc_pendingcount--; 1188 list_del(&desc->node); 1189 list_add_tail(&desc->node, &chan->active_list); 1190 if (chan->desc_submitcount == chan->num_frms) 1191 chan->desc_submitcount = 0; 1192 1193 chan->idle = false; 1194 } 1195 1196 /** 1197 * xilinx_cdma_start_transfer - Starts cdma transfer 1198 * @chan: Driver specific channel struct pointer 1199 */ 1200 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1201 { 1202 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1203 struct xilinx_cdma_tx_segment *tail_segment; 1204 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1205 1206 if (chan->err) 1207 return; 1208 1209 if (!chan->idle) 1210 return; 1211 1212 if (list_empty(&chan->pending_list)) 1213 return; 1214 1215 head_desc = list_first_entry(&chan->pending_list, 1216 struct xilinx_dma_tx_descriptor, node); 1217 tail_desc = list_last_entry(&chan->pending_list, 1218 struct xilinx_dma_tx_descriptor, node); 1219 tail_segment = list_last_entry(&tail_desc->segments, 1220 struct xilinx_cdma_tx_segment, node); 1221 1222 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1223 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1224 ctrl_reg |= chan->desc_pendingcount << 1225 XILINX_DMA_CR_COALESCE_SHIFT; 1226 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1227 } 1228 1229 if (chan->has_sg) { 1230 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 1231 XILINX_CDMA_CR_SGMODE); 1232 1233 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1234 XILINX_CDMA_CR_SGMODE); 1235 1236 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1237 head_desc->async_tx.phys); 1238 1239 /* Update tail ptr register which will start the transfer */ 1240 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1241 tail_segment->phys); 1242 } else { 1243 /* In simple mode */ 1244 struct xilinx_cdma_tx_segment *segment; 1245 struct xilinx_cdma_desc_hw *hw; 1246 1247 segment = list_first_entry(&head_desc->segments, 1248 struct xilinx_cdma_tx_segment, 1249 node); 1250 1251 hw = &segment->hw; 1252 1253 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, 1254 xilinx_prep_dma_addr_t(hw->src_addr)); 1255 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, 1256 xilinx_prep_dma_addr_t(hw->dest_addr)); 1257 1258 /* Start the transfer */ 1259 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1260 hw->control & chan->xdev->max_buffer_len); 1261 } 1262 1263 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1264 chan->desc_pendingcount = 0; 1265 chan->idle = false; 1266 } 1267 1268 /** 1269 * xilinx_dma_start_transfer - Starts DMA transfer 1270 * @chan: Driver specific channel struct pointer 1271 */ 1272 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1273 { 1274 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1275 struct xilinx_axidma_tx_segment *tail_segment; 1276 u32 reg; 1277 1278 if (chan->err) 1279 return; 1280 1281 if (list_empty(&chan->pending_list)) 1282 return; 1283 1284 if (!chan->idle) 1285 return; 1286 1287 head_desc = list_first_entry(&chan->pending_list, 1288 struct xilinx_dma_tx_descriptor, node); 1289 tail_desc = list_last_entry(&chan->pending_list, 1290 struct xilinx_dma_tx_descriptor, node); 1291 tail_segment = list_last_entry(&tail_desc->segments, 1292 struct xilinx_axidma_tx_segment, node); 1293 1294 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1295 1296 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1297 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1298 reg |= chan->desc_pendingcount << 1299 XILINX_DMA_CR_COALESCE_SHIFT; 1300 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1301 } 1302 1303 if (chan->has_sg && !chan->xdev->mcdma) 1304 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1305 head_desc->async_tx.phys); 1306 1307 if (chan->has_sg && chan->xdev->mcdma) { 1308 if (chan->direction == DMA_MEM_TO_DEV) { 1309 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1310 head_desc->async_tx.phys); 1311 } else { 1312 if (!chan->tdest) { 1313 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1314 head_desc->async_tx.phys); 1315 } else { 1316 dma_ctrl_write(chan, 1317 XILINX_DMA_MCRX_CDESC(chan->tdest), 1318 head_desc->async_tx.phys); 1319 } 1320 } 1321 } 1322 1323 xilinx_dma_start(chan); 1324 1325 if (chan->err) 1326 return; 1327 1328 /* Start the transfer */ 1329 if (chan->has_sg && !chan->xdev->mcdma) { 1330 if (chan->cyclic) 1331 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1332 chan->cyclic_seg_v->phys); 1333 else 1334 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1335 tail_segment->phys); 1336 } else if (chan->has_sg && chan->xdev->mcdma) { 1337 if (chan->direction == DMA_MEM_TO_DEV) { 1338 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1339 tail_segment->phys); 1340 } else { 1341 if (!chan->tdest) { 1342 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1343 tail_segment->phys); 1344 } else { 1345 dma_ctrl_write(chan, 1346 XILINX_DMA_MCRX_TDESC(chan->tdest), 1347 tail_segment->phys); 1348 } 1349 } 1350 } else { 1351 struct xilinx_axidma_tx_segment *segment; 1352 struct xilinx_axidma_desc_hw *hw; 1353 1354 segment = list_first_entry(&head_desc->segments, 1355 struct xilinx_axidma_tx_segment, 1356 node); 1357 hw = &segment->hw; 1358 1359 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1360 1361 /* Start the transfer */ 1362 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1363 hw->control & chan->xdev->max_buffer_len); 1364 } 1365 1366 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1367 chan->desc_pendingcount = 0; 1368 chan->idle = false; 1369 } 1370 1371 /** 1372 * xilinx_dma_issue_pending - Issue pending transactions 1373 * @dchan: DMA channel 1374 */ 1375 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1376 { 1377 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1378 unsigned long flags; 1379 1380 spin_lock_irqsave(&chan->lock, flags); 1381 chan->start_transfer(chan); 1382 spin_unlock_irqrestore(&chan->lock, flags); 1383 } 1384 1385 /** 1386 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1387 * @chan : xilinx DMA channel 1388 * 1389 * CONTEXT: hardirq 1390 */ 1391 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1392 { 1393 struct xilinx_dma_tx_descriptor *desc, *next; 1394 1395 /* This function was invoked with lock held */ 1396 if (list_empty(&chan->active_list)) 1397 return; 1398 1399 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1400 list_del(&desc->node); 1401 if (!desc->cyclic) 1402 dma_cookie_complete(&desc->async_tx); 1403 list_add_tail(&desc->node, &chan->done_list); 1404 } 1405 } 1406 1407 /** 1408 * xilinx_dma_reset - Reset DMA channel 1409 * @chan: Driver specific DMA channel 1410 * 1411 * Return: '0' on success and failure value on error 1412 */ 1413 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1414 { 1415 int err; 1416 u32 tmp; 1417 1418 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1419 1420 /* Wait for the hardware to finish reset */ 1421 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1422 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1423 XILINX_DMA_LOOP_COUNT); 1424 1425 if (err) { 1426 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1427 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1428 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1429 return -ETIMEDOUT; 1430 } 1431 1432 chan->err = false; 1433 chan->idle = true; 1434 chan->desc_submitcount = 0; 1435 1436 return err; 1437 } 1438 1439 /** 1440 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1441 * @chan: Driver specific DMA channel 1442 * 1443 * Return: '0' on success and failure value on error 1444 */ 1445 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1446 { 1447 int err; 1448 1449 /* Reset VDMA */ 1450 err = xilinx_dma_reset(chan); 1451 if (err) 1452 return err; 1453 1454 /* Enable interrupts */ 1455 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1456 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1457 1458 return 0; 1459 } 1460 1461 /** 1462 * xilinx_dma_irq_handler - DMA Interrupt handler 1463 * @irq: IRQ number 1464 * @data: Pointer to the Xilinx DMA channel structure 1465 * 1466 * Return: IRQ_HANDLED/IRQ_NONE 1467 */ 1468 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1469 { 1470 struct xilinx_dma_chan *chan = data; 1471 u32 status; 1472 1473 /* Read the status and ack the interrupts. */ 1474 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1475 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1476 return IRQ_NONE; 1477 1478 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1479 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1480 1481 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1482 /* 1483 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1484 * error is recoverable, ignore it. Otherwise flag the error. 1485 * 1486 * Only recoverable errors can be cleared in the DMASR register, 1487 * make sure not to write to other error bits to 1. 1488 */ 1489 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1490 1491 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1492 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1493 1494 if (!chan->flush_on_fsync || 1495 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1496 dev_err(chan->dev, 1497 "Channel %p has errors %x, cdr %x tdr %x\n", 1498 chan, errors, 1499 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1500 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1501 chan->err = true; 1502 } 1503 } 1504 1505 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1506 /* 1507 * Device takes too long to do the transfer when user requires 1508 * responsiveness. 1509 */ 1510 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1511 } 1512 1513 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1514 spin_lock(&chan->lock); 1515 xilinx_dma_complete_descriptor(chan); 1516 chan->idle = true; 1517 chan->start_transfer(chan); 1518 spin_unlock(&chan->lock); 1519 } 1520 1521 tasklet_schedule(&chan->tasklet); 1522 return IRQ_HANDLED; 1523 } 1524 1525 /** 1526 * append_desc_queue - Queuing descriptor 1527 * @chan: Driver specific dma channel 1528 * @desc: dma transaction descriptor 1529 */ 1530 static void append_desc_queue(struct xilinx_dma_chan *chan, 1531 struct xilinx_dma_tx_descriptor *desc) 1532 { 1533 struct xilinx_vdma_tx_segment *tail_segment; 1534 struct xilinx_dma_tx_descriptor *tail_desc; 1535 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1536 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1537 1538 if (list_empty(&chan->pending_list)) 1539 goto append; 1540 1541 /* 1542 * Add the hardware descriptor to the chain of hardware descriptors 1543 * that already exists in memory. 1544 */ 1545 tail_desc = list_last_entry(&chan->pending_list, 1546 struct xilinx_dma_tx_descriptor, node); 1547 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1548 tail_segment = list_last_entry(&tail_desc->segments, 1549 struct xilinx_vdma_tx_segment, 1550 node); 1551 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1552 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1553 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1554 struct xilinx_cdma_tx_segment, 1555 node); 1556 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1557 } else { 1558 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1559 struct xilinx_axidma_tx_segment, 1560 node); 1561 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1562 } 1563 1564 /* 1565 * Add the software descriptor and all children to the list 1566 * of pending transactions 1567 */ 1568 append: 1569 list_add_tail(&desc->node, &chan->pending_list); 1570 chan->desc_pendingcount++; 1571 1572 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1573 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1574 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1575 chan->desc_pendingcount = chan->num_frms; 1576 } 1577 } 1578 1579 /** 1580 * xilinx_dma_tx_submit - Submit DMA transaction 1581 * @tx: Async transaction descriptor 1582 * 1583 * Return: cookie value on success and failure value on error 1584 */ 1585 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1586 { 1587 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1588 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1589 dma_cookie_t cookie; 1590 unsigned long flags; 1591 int err; 1592 1593 if (chan->cyclic) { 1594 xilinx_dma_free_tx_descriptor(chan, desc); 1595 return -EBUSY; 1596 } 1597 1598 if (chan->err) { 1599 /* 1600 * If reset fails, need to hard reset the system. 1601 * Channel is no longer functional 1602 */ 1603 err = xilinx_dma_chan_reset(chan); 1604 if (err < 0) 1605 return err; 1606 } 1607 1608 spin_lock_irqsave(&chan->lock, flags); 1609 1610 cookie = dma_cookie_assign(tx); 1611 1612 /* Put this transaction onto the tail of the pending queue */ 1613 append_desc_queue(chan, desc); 1614 1615 if (desc->cyclic) 1616 chan->cyclic = true; 1617 1618 spin_unlock_irqrestore(&chan->lock, flags); 1619 1620 return cookie; 1621 } 1622 1623 /** 1624 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1625 * DMA_SLAVE transaction 1626 * @dchan: DMA channel 1627 * @xt: Interleaved template pointer 1628 * @flags: transfer ack flags 1629 * 1630 * Return: Async transaction descriptor on success and NULL on failure 1631 */ 1632 static struct dma_async_tx_descriptor * 1633 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1634 struct dma_interleaved_template *xt, 1635 unsigned long flags) 1636 { 1637 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1638 struct xilinx_dma_tx_descriptor *desc; 1639 struct xilinx_vdma_tx_segment *segment; 1640 struct xilinx_vdma_desc_hw *hw; 1641 1642 if (!is_slave_direction(xt->dir)) 1643 return NULL; 1644 1645 if (!xt->numf || !xt->sgl[0].size) 1646 return NULL; 1647 1648 if (xt->frame_size != 1) 1649 return NULL; 1650 1651 /* Allocate a transaction descriptor. */ 1652 desc = xilinx_dma_alloc_tx_descriptor(chan); 1653 if (!desc) 1654 return NULL; 1655 1656 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1657 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1658 async_tx_ack(&desc->async_tx); 1659 1660 /* Allocate the link descriptor from DMA pool */ 1661 segment = xilinx_vdma_alloc_tx_segment(chan); 1662 if (!segment) 1663 goto error; 1664 1665 /* Fill in the hardware descriptor */ 1666 hw = &segment->hw; 1667 hw->vsize = xt->numf; 1668 hw->hsize = xt->sgl[0].size; 1669 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1670 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1671 hw->stride |= chan->config.frm_dly << 1672 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1673 1674 if (xt->dir != DMA_MEM_TO_DEV) { 1675 if (chan->ext_addr) { 1676 hw->buf_addr = lower_32_bits(xt->dst_start); 1677 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1678 } else { 1679 hw->buf_addr = xt->dst_start; 1680 } 1681 } else { 1682 if (chan->ext_addr) { 1683 hw->buf_addr = lower_32_bits(xt->src_start); 1684 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1685 } else { 1686 hw->buf_addr = xt->src_start; 1687 } 1688 } 1689 1690 /* Insert the segment into the descriptor segments list. */ 1691 list_add_tail(&segment->node, &desc->segments); 1692 1693 /* Link the last hardware descriptor with the first. */ 1694 segment = list_first_entry(&desc->segments, 1695 struct xilinx_vdma_tx_segment, node); 1696 desc->async_tx.phys = segment->phys; 1697 1698 return &desc->async_tx; 1699 1700 error: 1701 xilinx_dma_free_tx_descriptor(chan, desc); 1702 return NULL; 1703 } 1704 1705 /** 1706 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1707 * @dchan: DMA channel 1708 * @dma_dst: destination address 1709 * @dma_src: source address 1710 * @len: transfer length 1711 * @flags: transfer ack flags 1712 * 1713 * Return: Async transaction descriptor on success and NULL on failure 1714 */ 1715 static struct dma_async_tx_descriptor * 1716 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1717 dma_addr_t dma_src, size_t len, unsigned long flags) 1718 { 1719 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1720 struct xilinx_dma_tx_descriptor *desc; 1721 struct xilinx_cdma_tx_segment *segment; 1722 struct xilinx_cdma_desc_hw *hw; 1723 1724 if (!len || len > chan->xdev->max_buffer_len) 1725 return NULL; 1726 1727 desc = xilinx_dma_alloc_tx_descriptor(chan); 1728 if (!desc) 1729 return NULL; 1730 1731 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1732 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1733 1734 /* Allocate the link descriptor from DMA pool */ 1735 segment = xilinx_cdma_alloc_tx_segment(chan); 1736 if (!segment) 1737 goto error; 1738 1739 hw = &segment->hw; 1740 hw->control = len; 1741 hw->src_addr = dma_src; 1742 hw->dest_addr = dma_dst; 1743 if (chan->ext_addr) { 1744 hw->src_addr_msb = upper_32_bits(dma_src); 1745 hw->dest_addr_msb = upper_32_bits(dma_dst); 1746 } 1747 1748 /* Insert the segment into the descriptor segments list. */ 1749 list_add_tail(&segment->node, &desc->segments); 1750 1751 desc->async_tx.phys = segment->phys; 1752 hw->next_desc = segment->phys; 1753 1754 return &desc->async_tx; 1755 1756 error: 1757 xilinx_dma_free_tx_descriptor(chan, desc); 1758 return NULL; 1759 } 1760 1761 /** 1762 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1763 * @dchan: DMA channel 1764 * @sgl: scatterlist to transfer to/from 1765 * @sg_len: number of entries in @scatterlist 1766 * @direction: DMA direction 1767 * @flags: transfer ack flags 1768 * @context: APP words of the descriptor 1769 * 1770 * Return: Async transaction descriptor on success and NULL on failure 1771 */ 1772 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1773 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1774 enum dma_transfer_direction direction, unsigned long flags, 1775 void *context) 1776 { 1777 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1778 struct xilinx_dma_tx_descriptor *desc; 1779 struct xilinx_axidma_tx_segment *segment = NULL; 1780 u32 *app_w = (u32 *)context; 1781 struct scatterlist *sg; 1782 size_t copy; 1783 size_t sg_used; 1784 unsigned int i; 1785 1786 if (!is_slave_direction(direction)) 1787 return NULL; 1788 1789 /* Allocate a transaction descriptor. */ 1790 desc = xilinx_dma_alloc_tx_descriptor(chan); 1791 if (!desc) 1792 return NULL; 1793 1794 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1795 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1796 1797 /* Build transactions using information in the scatter gather list */ 1798 for_each_sg(sgl, sg, sg_len, i) { 1799 sg_used = 0; 1800 1801 /* Loop until the entire scatterlist entry is used */ 1802 while (sg_used < sg_dma_len(sg)) { 1803 struct xilinx_axidma_desc_hw *hw; 1804 1805 /* Get a free segment */ 1806 segment = xilinx_axidma_alloc_tx_segment(chan); 1807 if (!segment) 1808 goto error; 1809 1810 /* 1811 * Calculate the maximum number of bytes to transfer, 1812 * making sure it is less than the hw limit 1813 */ 1814 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), 1815 sg_used); 1816 hw = &segment->hw; 1817 1818 /* Fill in the descriptor */ 1819 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1820 sg_used, 0); 1821 1822 hw->control = copy; 1823 1824 if (chan->direction == DMA_MEM_TO_DEV) { 1825 if (app_w) 1826 memcpy(hw->app, app_w, sizeof(u32) * 1827 XILINX_DMA_NUM_APP_WORDS); 1828 } 1829 1830 sg_used += copy; 1831 1832 /* 1833 * Insert the segment into the descriptor segments 1834 * list. 1835 */ 1836 list_add_tail(&segment->node, &desc->segments); 1837 } 1838 } 1839 1840 segment = list_first_entry(&desc->segments, 1841 struct xilinx_axidma_tx_segment, node); 1842 desc->async_tx.phys = segment->phys; 1843 1844 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1845 if (chan->direction == DMA_MEM_TO_DEV) { 1846 segment->hw.control |= XILINX_DMA_BD_SOP; 1847 segment = list_last_entry(&desc->segments, 1848 struct xilinx_axidma_tx_segment, 1849 node); 1850 segment->hw.control |= XILINX_DMA_BD_EOP; 1851 } 1852 1853 return &desc->async_tx; 1854 1855 error: 1856 xilinx_dma_free_tx_descriptor(chan, desc); 1857 return NULL; 1858 } 1859 1860 /** 1861 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1862 * @dchan: DMA channel 1863 * @buf_addr: Physical address of the buffer 1864 * @buf_len: Total length of the cyclic buffers 1865 * @period_len: length of individual cyclic buffer 1866 * @direction: DMA direction 1867 * @flags: transfer ack flags 1868 * 1869 * Return: Async transaction descriptor on success and NULL on failure 1870 */ 1871 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1872 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1873 size_t period_len, enum dma_transfer_direction direction, 1874 unsigned long flags) 1875 { 1876 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1877 struct xilinx_dma_tx_descriptor *desc; 1878 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1879 size_t copy, sg_used; 1880 unsigned int num_periods; 1881 int i; 1882 u32 reg; 1883 1884 if (!period_len) 1885 return NULL; 1886 1887 num_periods = buf_len / period_len; 1888 1889 if (!num_periods) 1890 return NULL; 1891 1892 if (!is_slave_direction(direction)) 1893 return NULL; 1894 1895 /* Allocate a transaction descriptor. */ 1896 desc = xilinx_dma_alloc_tx_descriptor(chan); 1897 if (!desc) 1898 return NULL; 1899 1900 chan->direction = direction; 1901 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1902 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1903 1904 for (i = 0; i < num_periods; ++i) { 1905 sg_used = 0; 1906 1907 while (sg_used < period_len) { 1908 struct xilinx_axidma_desc_hw *hw; 1909 1910 /* Get a free segment */ 1911 segment = xilinx_axidma_alloc_tx_segment(chan); 1912 if (!segment) 1913 goto error; 1914 1915 /* 1916 * Calculate the maximum number of bytes to transfer, 1917 * making sure it is less than the hw limit 1918 */ 1919 copy = xilinx_dma_calc_copysize(chan, period_len, 1920 sg_used); 1921 hw = &segment->hw; 1922 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1923 period_len * i); 1924 hw->control = copy; 1925 1926 if (prev) 1927 prev->hw.next_desc = segment->phys; 1928 1929 prev = segment; 1930 sg_used += copy; 1931 1932 /* 1933 * Insert the segment into the descriptor segments 1934 * list. 1935 */ 1936 list_add_tail(&segment->node, &desc->segments); 1937 } 1938 } 1939 1940 head_segment = list_first_entry(&desc->segments, 1941 struct xilinx_axidma_tx_segment, node); 1942 desc->async_tx.phys = head_segment->phys; 1943 1944 desc->cyclic = true; 1945 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1946 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1947 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1948 1949 segment = list_last_entry(&desc->segments, 1950 struct xilinx_axidma_tx_segment, 1951 node); 1952 segment->hw.next_desc = (u32) head_segment->phys; 1953 1954 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1955 if (direction == DMA_MEM_TO_DEV) { 1956 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1957 segment->hw.control |= XILINX_DMA_BD_EOP; 1958 } 1959 1960 return &desc->async_tx; 1961 1962 error: 1963 xilinx_dma_free_tx_descriptor(chan, desc); 1964 return NULL; 1965 } 1966 1967 /** 1968 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1969 * DMA_SLAVE transaction 1970 * @dchan: DMA channel 1971 * @xt: Interleaved template pointer 1972 * @flags: transfer ack flags 1973 * 1974 * Return: Async transaction descriptor on success and NULL on failure 1975 */ 1976 static struct dma_async_tx_descriptor * 1977 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1978 struct dma_interleaved_template *xt, 1979 unsigned long flags) 1980 { 1981 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1982 struct xilinx_dma_tx_descriptor *desc; 1983 struct xilinx_axidma_tx_segment *segment; 1984 struct xilinx_axidma_desc_hw *hw; 1985 1986 if (!is_slave_direction(xt->dir)) 1987 return NULL; 1988 1989 if (!xt->numf || !xt->sgl[0].size) 1990 return NULL; 1991 1992 if (xt->frame_size != 1) 1993 return NULL; 1994 1995 /* Allocate a transaction descriptor. */ 1996 desc = xilinx_dma_alloc_tx_descriptor(chan); 1997 if (!desc) 1998 return NULL; 1999 2000 chan->direction = xt->dir; 2001 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2002 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2003 2004 /* Get a free segment */ 2005 segment = xilinx_axidma_alloc_tx_segment(chan); 2006 if (!segment) 2007 goto error; 2008 2009 hw = &segment->hw; 2010 2011 /* Fill in the descriptor */ 2012 if (xt->dir != DMA_MEM_TO_DEV) 2013 hw->buf_addr = xt->dst_start; 2014 else 2015 hw->buf_addr = xt->src_start; 2016 2017 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 2018 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 2019 XILINX_DMA_BD_VSIZE_MASK; 2020 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 2021 XILINX_DMA_BD_STRIDE_MASK; 2022 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 2023 2024 /* 2025 * Insert the segment into the descriptor segments 2026 * list. 2027 */ 2028 list_add_tail(&segment->node, &desc->segments); 2029 2030 2031 segment = list_first_entry(&desc->segments, 2032 struct xilinx_axidma_tx_segment, node); 2033 desc->async_tx.phys = segment->phys; 2034 2035 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2036 if (xt->dir == DMA_MEM_TO_DEV) { 2037 segment->hw.control |= XILINX_DMA_BD_SOP; 2038 segment = list_last_entry(&desc->segments, 2039 struct xilinx_axidma_tx_segment, 2040 node); 2041 segment->hw.control |= XILINX_DMA_BD_EOP; 2042 } 2043 2044 return &desc->async_tx; 2045 2046 error: 2047 xilinx_dma_free_tx_descriptor(chan, desc); 2048 return NULL; 2049 } 2050 2051 /** 2052 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2053 * @dchan: Driver specific DMA Channel pointer 2054 * 2055 * Return: '0' always. 2056 */ 2057 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2058 { 2059 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2060 u32 reg; 2061 int err; 2062 2063 if (chan->cyclic) 2064 xilinx_dma_chan_reset(chan); 2065 2066 err = chan->stop_transfer(chan); 2067 if (err) { 2068 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2069 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 2070 chan->err = true; 2071 } 2072 2073 /* Remove and free all of the descriptors in the lists */ 2074 xilinx_dma_free_descriptors(chan); 2075 chan->idle = true; 2076 2077 if (chan->cyclic) { 2078 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2079 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2080 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2081 chan->cyclic = false; 2082 } 2083 2084 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 2085 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2086 XILINX_CDMA_CR_SGMODE); 2087 2088 return 0; 2089 } 2090 2091 /** 2092 * xilinx_dma_channel_set_config - Configure VDMA channel 2093 * Run-time configuration for Axi VDMA, supports: 2094 * . halt the channel 2095 * . configure interrupt coalescing and inter-packet delay threshold 2096 * . start/stop parking 2097 * . enable genlock 2098 * 2099 * @dchan: DMA channel 2100 * @cfg: VDMA device configuration pointer 2101 * 2102 * Return: '0' on success and failure value on error 2103 */ 2104 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2105 struct xilinx_vdma_config *cfg) 2106 { 2107 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2108 u32 dmacr; 2109 2110 if (cfg->reset) 2111 return xilinx_dma_chan_reset(chan); 2112 2113 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2114 2115 chan->config.frm_dly = cfg->frm_dly; 2116 chan->config.park = cfg->park; 2117 2118 /* genlock settings */ 2119 chan->config.gen_lock = cfg->gen_lock; 2120 chan->config.master = cfg->master; 2121 2122 if (cfg->gen_lock && chan->genlock) { 2123 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2124 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2125 } 2126 2127 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2128 chan->config.vflip_en = cfg->vflip_en; 2129 2130 if (cfg->park) 2131 chan->config.park_frm = cfg->park_frm; 2132 else 2133 chan->config.park_frm = -1; 2134 2135 chan->config.coalesc = cfg->coalesc; 2136 chan->config.delay = cfg->delay; 2137 2138 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2139 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2140 chan->config.coalesc = cfg->coalesc; 2141 } 2142 2143 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2144 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2145 chan->config.delay = cfg->delay; 2146 } 2147 2148 /* FSync Source selection */ 2149 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2150 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2151 2152 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2153 2154 return 0; 2155 } 2156 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2157 2158 /* ----------------------------------------------------------------------------- 2159 * Probe and remove 2160 */ 2161 2162 /** 2163 * xilinx_dma_chan_remove - Per Channel remove function 2164 * @chan: Driver specific DMA channel 2165 */ 2166 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2167 { 2168 /* Disable all interrupts */ 2169 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2170 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2171 2172 if (chan->irq > 0) 2173 free_irq(chan->irq, chan); 2174 2175 tasklet_kill(&chan->tasklet); 2176 2177 list_del(&chan->common.device_node); 2178 } 2179 2180 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2181 struct clk **tx_clk, struct clk **rx_clk, 2182 struct clk **sg_clk, struct clk **tmp_clk) 2183 { 2184 int err; 2185 2186 *tmp_clk = NULL; 2187 2188 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2189 if (IS_ERR(*axi_clk)) { 2190 err = PTR_ERR(*axi_clk); 2191 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2192 return err; 2193 } 2194 2195 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2196 if (IS_ERR(*tx_clk)) 2197 *tx_clk = NULL; 2198 2199 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2200 if (IS_ERR(*rx_clk)) 2201 *rx_clk = NULL; 2202 2203 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2204 if (IS_ERR(*sg_clk)) 2205 *sg_clk = NULL; 2206 2207 err = clk_prepare_enable(*axi_clk); 2208 if (err) { 2209 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2210 return err; 2211 } 2212 2213 err = clk_prepare_enable(*tx_clk); 2214 if (err) { 2215 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2216 goto err_disable_axiclk; 2217 } 2218 2219 err = clk_prepare_enable(*rx_clk); 2220 if (err) { 2221 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2222 goto err_disable_txclk; 2223 } 2224 2225 err = clk_prepare_enable(*sg_clk); 2226 if (err) { 2227 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2228 goto err_disable_rxclk; 2229 } 2230 2231 return 0; 2232 2233 err_disable_rxclk: 2234 clk_disable_unprepare(*rx_clk); 2235 err_disable_txclk: 2236 clk_disable_unprepare(*tx_clk); 2237 err_disable_axiclk: 2238 clk_disable_unprepare(*axi_clk); 2239 2240 return err; 2241 } 2242 2243 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2244 struct clk **dev_clk, struct clk **tmp_clk, 2245 struct clk **tmp1_clk, struct clk **tmp2_clk) 2246 { 2247 int err; 2248 2249 *tmp_clk = NULL; 2250 *tmp1_clk = NULL; 2251 *tmp2_clk = NULL; 2252 2253 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2254 if (IS_ERR(*axi_clk)) { 2255 err = PTR_ERR(*axi_clk); 2256 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 2257 return err; 2258 } 2259 2260 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2261 if (IS_ERR(*dev_clk)) { 2262 err = PTR_ERR(*dev_clk); 2263 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 2264 return err; 2265 } 2266 2267 err = clk_prepare_enable(*axi_clk); 2268 if (err) { 2269 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2270 return err; 2271 } 2272 2273 err = clk_prepare_enable(*dev_clk); 2274 if (err) { 2275 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2276 goto err_disable_axiclk; 2277 } 2278 2279 return 0; 2280 2281 err_disable_axiclk: 2282 clk_disable_unprepare(*axi_clk); 2283 2284 return err; 2285 } 2286 2287 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2288 struct clk **tx_clk, struct clk **txs_clk, 2289 struct clk **rx_clk, struct clk **rxs_clk) 2290 { 2291 int err; 2292 2293 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2294 if (IS_ERR(*axi_clk)) { 2295 err = PTR_ERR(*axi_clk); 2296 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2297 return err; 2298 } 2299 2300 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2301 if (IS_ERR(*tx_clk)) 2302 *tx_clk = NULL; 2303 2304 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2305 if (IS_ERR(*txs_clk)) 2306 *txs_clk = NULL; 2307 2308 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2309 if (IS_ERR(*rx_clk)) 2310 *rx_clk = NULL; 2311 2312 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2313 if (IS_ERR(*rxs_clk)) 2314 *rxs_clk = NULL; 2315 2316 err = clk_prepare_enable(*axi_clk); 2317 if (err) { 2318 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2319 return err; 2320 } 2321 2322 err = clk_prepare_enable(*tx_clk); 2323 if (err) { 2324 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2325 goto err_disable_axiclk; 2326 } 2327 2328 err = clk_prepare_enable(*txs_clk); 2329 if (err) { 2330 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2331 goto err_disable_txclk; 2332 } 2333 2334 err = clk_prepare_enable(*rx_clk); 2335 if (err) { 2336 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2337 goto err_disable_txsclk; 2338 } 2339 2340 err = clk_prepare_enable(*rxs_clk); 2341 if (err) { 2342 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2343 goto err_disable_rxclk; 2344 } 2345 2346 return 0; 2347 2348 err_disable_rxclk: 2349 clk_disable_unprepare(*rx_clk); 2350 err_disable_txsclk: 2351 clk_disable_unprepare(*txs_clk); 2352 err_disable_txclk: 2353 clk_disable_unprepare(*tx_clk); 2354 err_disable_axiclk: 2355 clk_disable_unprepare(*axi_clk); 2356 2357 return err; 2358 } 2359 2360 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2361 { 2362 clk_disable_unprepare(xdev->rxs_clk); 2363 clk_disable_unprepare(xdev->rx_clk); 2364 clk_disable_unprepare(xdev->txs_clk); 2365 clk_disable_unprepare(xdev->tx_clk); 2366 clk_disable_unprepare(xdev->axi_clk); 2367 } 2368 2369 /** 2370 * xilinx_dma_chan_probe - Per Channel Probing 2371 * It get channel features from the device tree entry and 2372 * initialize special channel handling routines 2373 * 2374 * @xdev: Driver specific device structure 2375 * @node: Device node 2376 * @chan_id: DMA Channel id 2377 * 2378 * Return: '0' on success and failure value on error 2379 */ 2380 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2381 struct device_node *node, int chan_id) 2382 { 2383 struct xilinx_dma_chan *chan; 2384 bool has_dre = false; 2385 u32 value, width; 2386 int err; 2387 2388 /* Allocate and initialize the channel structure */ 2389 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2390 if (!chan) 2391 return -ENOMEM; 2392 2393 chan->dev = xdev->dev; 2394 chan->xdev = xdev; 2395 chan->desc_pendingcount = 0x0; 2396 chan->ext_addr = xdev->ext_addr; 2397 /* This variable ensures that descriptors are not 2398 * Submitted when dma engine is in progress. This variable is 2399 * Added to avoid polling for a bit in the status register to 2400 * Know dma state in the driver hot path. 2401 */ 2402 chan->idle = true; 2403 2404 spin_lock_init(&chan->lock); 2405 INIT_LIST_HEAD(&chan->pending_list); 2406 INIT_LIST_HEAD(&chan->done_list); 2407 INIT_LIST_HEAD(&chan->active_list); 2408 INIT_LIST_HEAD(&chan->free_seg_list); 2409 2410 /* Retrieve the channel properties from the device tree */ 2411 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2412 2413 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2414 2415 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2416 if (err) { 2417 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2418 return err; 2419 } 2420 width = value >> 3; /* Convert bits to bytes */ 2421 2422 /* If data width is greater than 8 bytes, DRE is not in hw */ 2423 if (width > 8) 2424 has_dre = false; 2425 2426 if (!has_dre) 2427 xdev->common.copy_align = fls(width - 1); 2428 2429 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2430 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2431 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2432 chan->direction = DMA_MEM_TO_DEV; 2433 chan->id = chan_id; 2434 chan->tdest = chan_id; 2435 2436 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2437 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2438 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2439 chan->config.park = 1; 2440 2441 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2442 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2443 chan->flush_on_fsync = true; 2444 } 2445 } else if (of_device_is_compatible(node, 2446 "xlnx,axi-vdma-s2mm-channel") || 2447 of_device_is_compatible(node, 2448 "xlnx,axi-dma-s2mm-channel")) { 2449 chan->direction = DMA_DEV_TO_MEM; 2450 chan->id = chan_id; 2451 chan->tdest = chan_id - xdev->nr_channels; 2452 chan->has_vflip = of_property_read_bool(node, 2453 "xlnx,enable-vert-flip"); 2454 if (chan->has_vflip) { 2455 chan->config.vflip_en = dma_read(chan, 2456 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 2457 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2458 } 2459 2460 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2461 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2462 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2463 chan->config.park = 1; 2464 2465 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2466 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2467 chan->flush_on_fsync = true; 2468 } 2469 } else { 2470 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2471 return -EINVAL; 2472 } 2473 2474 /* Request the interrupt */ 2475 chan->irq = irq_of_parse_and_map(node, 0); 2476 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2477 "xilinx-dma-controller", chan); 2478 if (err) { 2479 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2480 return err; 2481 } 2482 2483 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2484 chan->start_transfer = xilinx_dma_start_transfer; 2485 chan->stop_transfer = xilinx_dma_stop_transfer; 2486 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2487 chan->start_transfer = xilinx_cdma_start_transfer; 2488 chan->stop_transfer = xilinx_cdma_stop_transfer; 2489 } else { 2490 chan->start_transfer = xilinx_vdma_start_transfer; 2491 chan->stop_transfer = xilinx_dma_stop_transfer; 2492 } 2493 2494 /* check if SG is enabled (only for AXIDMA and CDMA) */ 2495 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { 2496 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 2497 XILINX_DMA_DMASR_SG_MASK) 2498 chan->has_sg = true; 2499 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, 2500 chan->has_sg ? "enabled" : "disabled"); 2501 } 2502 2503 /* Initialize the tasklet */ 2504 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2505 (unsigned long)chan); 2506 2507 /* 2508 * Initialize the DMA channel and add it to the DMA engine channels 2509 * list. 2510 */ 2511 chan->common.device = &xdev->common; 2512 2513 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2514 xdev->chan[chan->id] = chan; 2515 2516 /* Reset the channel */ 2517 err = xilinx_dma_chan_reset(chan); 2518 if (err < 0) { 2519 dev_err(xdev->dev, "Reset channel failed\n"); 2520 return err; 2521 } 2522 2523 return 0; 2524 } 2525 2526 /** 2527 * xilinx_dma_child_probe - Per child node probe 2528 * It get number of dma-channels per child node from 2529 * device-tree and initializes all the channels. 2530 * 2531 * @xdev: Driver specific device structure 2532 * @node: Device node 2533 * 2534 * Return: 0 always. 2535 */ 2536 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2537 struct device_node *node) 2538 { 2539 int ret, i, nr_channels = 1; 2540 2541 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2542 if ((ret < 0) && xdev->mcdma) 2543 dev_warn(xdev->dev, "missing dma-channels property\n"); 2544 2545 for (i = 0; i < nr_channels; i++) 2546 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2547 2548 xdev->nr_channels += nr_channels; 2549 2550 return 0; 2551 } 2552 2553 /** 2554 * of_dma_xilinx_xlate - Translation function 2555 * @dma_spec: Pointer to DMA specifier as found in the device tree 2556 * @ofdma: Pointer to DMA controller data 2557 * 2558 * Return: DMA channel pointer on success and NULL on error 2559 */ 2560 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2561 struct of_dma *ofdma) 2562 { 2563 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2564 int chan_id = dma_spec->args[0]; 2565 2566 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2567 return NULL; 2568 2569 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2570 } 2571 2572 static const struct xilinx_dma_config axidma_config = { 2573 .dmatype = XDMA_TYPE_AXIDMA, 2574 .clk_init = axidma_clk_init, 2575 }; 2576 2577 static const struct xilinx_dma_config axicdma_config = { 2578 .dmatype = XDMA_TYPE_CDMA, 2579 .clk_init = axicdma_clk_init, 2580 }; 2581 2582 static const struct xilinx_dma_config axivdma_config = { 2583 .dmatype = XDMA_TYPE_VDMA, 2584 .clk_init = axivdma_clk_init, 2585 }; 2586 2587 static const struct of_device_id xilinx_dma_of_ids[] = { 2588 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2589 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2590 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2591 {} 2592 }; 2593 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2594 2595 /** 2596 * xilinx_dma_probe - Driver probe function 2597 * @pdev: Pointer to the platform_device structure 2598 * 2599 * Return: '0' on success and failure value on error 2600 */ 2601 static int xilinx_dma_probe(struct platform_device *pdev) 2602 { 2603 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2604 struct clk **, struct clk **, struct clk **) 2605 = axivdma_clk_init; 2606 struct device_node *node = pdev->dev.of_node; 2607 struct xilinx_dma_device *xdev; 2608 struct device_node *child, *np = pdev->dev.of_node; 2609 struct resource *io; 2610 u32 num_frames, addr_width, len_width; 2611 int i, err; 2612 2613 /* Allocate and initialize the DMA engine structure */ 2614 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2615 if (!xdev) 2616 return -ENOMEM; 2617 2618 xdev->dev = &pdev->dev; 2619 if (np) { 2620 const struct of_device_id *match; 2621 2622 match = of_match_node(xilinx_dma_of_ids, np); 2623 if (match && match->data) { 2624 xdev->dma_config = match->data; 2625 clk_init = xdev->dma_config->clk_init; 2626 } 2627 } 2628 2629 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2630 &xdev->rx_clk, &xdev->rxs_clk); 2631 if (err) 2632 return err; 2633 2634 /* Request and map I/O memory */ 2635 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2636 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2637 if (IS_ERR(xdev->regs)) 2638 return PTR_ERR(xdev->regs); 2639 2640 /* Retrieve the DMA engine properties from the device tree */ 2641 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 2642 2643 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2644 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2645 if (!of_property_read_u32(node, "xlnx,sg-length-width", 2646 &len_width)) { 2647 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 2648 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 2649 dev_warn(xdev->dev, 2650 "invalid xlnx,sg-length-width property value. Using default width\n"); 2651 } else { 2652 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 2653 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 2654 xdev->max_buffer_len = 2655 GENMASK(len_width - 1, 0); 2656 } 2657 } 2658 } 2659 2660 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2661 err = of_property_read_u32(node, "xlnx,num-fstores", 2662 &num_frames); 2663 if (err < 0) { 2664 dev_err(xdev->dev, 2665 "missing xlnx,num-fstores property\n"); 2666 return err; 2667 } 2668 2669 err = of_property_read_u32(node, "xlnx,flush-fsync", 2670 &xdev->flush_on_fsync); 2671 if (err < 0) 2672 dev_warn(xdev->dev, 2673 "missing xlnx,flush-fsync property\n"); 2674 } 2675 2676 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2677 if (err < 0) 2678 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2679 2680 if (addr_width > 32) 2681 xdev->ext_addr = true; 2682 else 2683 xdev->ext_addr = false; 2684 2685 /* Set the dma mask bits */ 2686 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2687 2688 /* Initialize the DMA engine */ 2689 xdev->common.dev = &pdev->dev; 2690 2691 INIT_LIST_HEAD(&xdev->common.channels); 2692 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2693 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2694 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2695 } 2696 2697 xdev->common.device_alloc_chan_resources = 2698 xilinx_dma_alloc_chan_resources; 2699 xdev->common.device_free_chan_resources = 2700 xilinx_dma_free_chan_resources; 2701 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2702 xdev->common.device_tx_status = xilinx_dma_tx_status; 2703 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2704 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2705 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2706 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2707 xdev->common.device_prep_dma_cyclic = 2708 xilinx_dma_prep_dma_cyclic; 2709 xdev->common.device_prep_interleaved_dma = 2710 xilinx_dma_prep_interleaved; 2711 /* Residue calculation is supported by only AXI DMA */ 2712 xdev->common.residue_granularity = 2713 DMA_RESIDUE_GRANULARITY_SEGMENT; 2714 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2715 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2716 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2717 } else { 2718 xdev->common.device_prep_interleaved_dma = 2719 xilinx_vdma_dma_prep_interleaved; 2720 } 2721 2722 platform_set_drvdata(pdev, xdev); 2723 2724 /* Initialize the channels */ 2725 for_each_child_of_node(node, child) { 2726 err = xilinx_dma_child_probe(xdev, child); 2727 if (err < 0) 2728 goto disable_clks; 2729 } 2730 2731 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2732 for (i = 0; i < xdev->nr_channels; i++) 2733 if (xdev->chan[i]) 2734 xdev->chan[i]->num_frms = num_frames; 2735 } 2736 2737 /* Register the DMA engine with the core */ 2738 dma_async_device_register(&xdev->common); 2739 2740 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2741 xdev); 2742 if (err < 0) { 2743 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2744 dma_async_device_unregister(&xdev->common); 2745 goto error; 2746 } 2747 2748 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2749 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 2750 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 2751 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 2752 else 2753 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2754 2755 return 0; 2756 2757 disable_clks: 2758 xdma_disable_allclks(xdev); 2759 error: 2760 for (i = 0; i < xdev->nr_channels; i++) 2761 if (xdev->chan[i]) 2762 xilinx_dma_chan_remove(xdev->chan[i]); 2763 2764 return err; 2765 } 2766 2767 /** 2768 * xilinx_dma_remove - Driver remove function 2769 * @pdev: Pointer to the platform_device structure 2770 * 2771 * Return: Always '0' 2772 */ 2773 static int xilinx_dma_remove(struct platform_device *pdev) 2774 { 2775 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2776 int i; 2777 2778 of_dma_controller_free(pdev->dev.of_node); 2779 2780 dma_async_device_unregister(&xdev->common); 2781 2782 for (i = 0; i < xdev->nr_channels; i++) 2783 if (xdev->chan[i]) 2784 xilinx_dma_chan_remove(xdev->chan[i]); 2785 2786 xdma_disable_allclks(xdev); 2787 2788 return 0; 2789 } 2790 2791 static struct platform_driver xilinx_vdma_driver = { 2792 .driver = { 2793 .name = "xilinx-vdma", 2794 .of_match_table = xilinx_dma_of_ids, 2795 }, 2796 .probe = xilinx_dma_probe, 2797 .remove = xilinx_dma_remove, 2798 }; 2799 2800 module_platform_driver(xilinx_vdma_driver); 2801 2802 MODULE_AUTHOR("Xilinx, Inc."); 2803 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2804 MODULE_LICENSE("GPL v2"); 2805