1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx Video DMA Engine 4 * 5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 6 * 7 * Based on the Freescale DMA driver. 8 * 9 * Description: 10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 11 * core that provides high-bandwidth direct memory access between memory 12 * and AXI4-Stream type video target peripherals. The core provides efficient 13 * two dimensional DMA operations with independent asynchronous read (S2MM) 14 * and write (MM2S) channel operation. It can be configured to have either 15 * one channel or two channels. If configured as two channels, one is to 16 * transmit to the video device (MM2S) and another is to receive from the 17 * video device (S2MM). Initialization, status, interrupt and management 18 * registers are accessed through an AXI4-Lite slave interface. 19 * 20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 21 * provides high-bandwidth one dimensional direct memory access between memory 22 * and AXI4-Stream target peripherals. It supports one receive and one 23 * transmit channel, both of them optional at synthesis time. 24 * 25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 26 * Access (DMA) between a memory-mapped source address and a memory-mapped 27 * destination address. 28 */ 29 30 #include <linux/bitops.h> 31 #include <linux/dmapool.h> 32 #include <linux/dma/xilinx_dma.h> 33 #include <linux/init.h> 34 #include <linux/interrupt.h> 35 #include <linux/io.h> 36 #include <linux/iopoll.h> 37 #include <linux/module.h> 38 #include <linux/of_address.h> 39 #include <linux/of_dma.h> 40 #include <linux/of_platform.h> 41 #include <linux/of_irq.h> 42 #include <linux/slab.h> 43 #include <linux/clk.h> 44 #include <linux/io-64-nonatomic-lo-hi.h> 45 46 #include "../dmaengine.h" 47 48 /* Register/Descriptor Offsets */ 49 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 50 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 51 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 52 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 53 54 /* Control Registers */ 55 #define XILINX_DMA_REG_DMACR 0x0000 56 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 57 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 58 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 59 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 60 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 61 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 62 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 63 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 64 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 65 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 66 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 67 #define XILINX_DMA_DMACR_RESET BIT(2) 68 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 69 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 70 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 71 72 #define XILINX_DMA_REG_DMASR 0x0004 73 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 74 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 75 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 76 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 77 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 78 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 79 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 80 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 81 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 82 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 83 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 84 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 85 #define XILINX_DMA_DMASR_SG_MASK BIT(3) 86 #define XILINX_DMA_DMASR_IDLE BIT(1) 87 #define XILINX_DMA_DMASR_HALTED BIT(0) 88 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 89 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 90 91 #define XILINX_DMA_REG_CURDESC 0x0008 92 #define XILINX_DMA_REG_TAILDESC 0x0010 93 #define XILINX_DMA_REG_REG_INDEX 0x0014 94 #define XILINX_DMA_REG_FRMSTORE 0x0018 95 #define XILINX_DMA_REG_THRESHOLD 0x001c 96 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 97 #define XILINX_DMA_REG_PARK_PTR 0x0028 98 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 99 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 100 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 101 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 102 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 103 104 /* Register Direct Mode Registers */ 105 #define XILINX_DMA_REG_VSIZE 0x0000 106 #define XILINX_DMA_REG_HSIZE 0x0004 107 108 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 109 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 110 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 111 112 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 113 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 114 115 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 116 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 117 118 /* HW specific definitions */ 119 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 120 121 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 123 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 124 XILINX_DMA_DMASR_ERR_IRQ) 125 126 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 127 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 128 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 129 XILINX_DMA_DMASR_SG_DEC_ERR | \ 130 XILINX_DMA_DMASR_SG_SLV_ERR | \ 131 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 132 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 133 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 134 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 135 XILINX_DMA_DMASR_DMA_INT_ERR) 136 137 /* 138 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 139 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 140 * is enabled in the h/w system. 141 */ 142 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 143 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 144 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 145 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 146 XILINX_DMA_DMASR_DMA_INT_ERR) 147 148 /* Axi VDMA Flush on Fsync bits */ 149 #define XILINX_DMA_FLUSH_S2MM 3 150 #define XILINX_DMA_FLUSH_MM2S 2 151 #define XILINX_DMA_FLUSH_BOTH 1 152 153 /* Delay loop counter to prevent hardware failure */ 154 #define XILINX_DMA_LOOP_COUNT 1000000 155 156 /* AXI DMA Specific Registers/Offsets */ 157 #define XILINX_DMA_REG_SRCDSTADDR 0x18 158 #define XILINX_DMA_REG_BTT 0x28 159 160 /* AXI DMA Specific Masks/Bit fields */ 161 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 162 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 163 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 164 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 165 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 166 #define XILINX_DMA_CR_COALESCE_SHIFT 16 167 #define XILINX_DMA_BD_SOP BIT(27) 168 #define XILINX_DMA_BD_EOP BIT(26) 169 #define XILINX_DMA_COALESCE_MAX 255 170 #define XILINX_DMA_NUM_DESCS 255 171 #define XILINX_DMA_NUM_APP_WORDS 5 172 173 /* Multi-Channel DMA Descriptor offsets*/ 174 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 175 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 176 177 /* Multi-Channel DMA Masks/Shifts */ 178 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 179 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 180 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 181 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 182 #define XILINX_DMA_BD_STRIDE_SHIFT 0 183 #define XILINX_DMA_BD_VSIZE_SHIFT 19 184 185 /* AXI CDMA Specific Registers/Offsets */ 186 #define XILINX_CDMA_REG_SRCADDR 0x18 187 #define XILINX_CDMA_REG_DSTADDR 0x20 188 189 /* AXI CDMA Specific Masks */ 190 #define XILINX_CDMA_CR_SGMODE BIT(3) 191 192 #define xilinx_prep_dma_addr_t(addr) \ 193 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) 194 /** 195 * struct xilinx_vdma_desc_hw - Hardware Descriptor 196 * @next_desc: Next Descriptor Pointer @0x00 197 * @pad1: Reserved @0x04 198 * @buf_addr: Buffer address @0x08 199 * @buf_addr_msb: MSB of Buffer address @0x0C 200 * @vsize: Vertical Size @0x10 201 * @hsize: Horizontal Size @0x14 202 * @stride: Number of bytes between the first 203 * pixels of each horizontal line @0x18 204 */ 205 struct xilinx_vdma_desc_hw { 206 u32 next_desc; 207 u32 pad1; 208 u32 buf_addr; 209 u32 buf_addr_msb; 210 u32 vsize; 211 u32 hsize; 212 u32 stride; 213 } __aligned(64); 214 215 /** 216 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 217 * @next_desc: Next Descriptor Pointer @0x00 218 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 219 * @buf_addr: Buffer address @0x08 220 * @buf_addr_msb: MSB of Buffer address @0x0C 221 * @mcdma_control: Control field for mcdma @0x10 222 * @vsize_stride: Vsize and Stride field for mcdma @0x14 223 * @control: Control field @0x18 224 * @status: Status field @0x1C 225 * @app: APP Fields @0x20 - 0x30 226 */ 227 struct xilinx_axidma_desc_hw { 228 u32 next_desc; 229 u32 next_desc_msb; 230 u32 buf_addr; 231 u32 buf_addr_msb; 232 u32 mcdma_control; 233 u32 vsize_stride; 234 u32 control; 235 u32 status; 236 u32 app[XILINX_DMA_NUM_APP_WORDS]; 237 } __aligned(64); 238 239 /** 240 * struct xilinx_cdma_desc_hw - Hardware Descriptor 241 * @next_desc: Next Descriptor Pointer @0x00 242 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 243 * @src_addr: Source address @0x08 244 * @src_addr_msb: Source address MSB @0x0C 245 * @dest_addr: Destination address @0x10 246 * @dest_addr_msb: Destination address MSB @0x14 247 * @control: Control field @0x18 248 * @status: Status field @0x1C 249 */ 250 struct xilinx_cdma_desc_hw { 251 u32 next_desc; 252 u32 next_desc_msb; 253 u32 src_addr; 254 u32 src_addr_msb; 255 u32 dest_addr; 256 u32 dest_addr_msb; 257 u32 control; 258 u32 status; 259 } __aligned(64); 260 261 /** 262 * struct xilinx_vdma_tx_segment - Descriptor segment 263 * @hw: Hardware descriptor 264 * @node: Node in the descriptor segments list 265 * @phys: Physical address of segment 266 */ 267 struct xilinx_vdma_tx_segment { 268 struct xilinx_vdma_desc_hw hw; 269 struct list_head node; 270 dma_addr_t phys; 271 } __aligned(64); 272 273 /** 274 * struct xilinx_axidma_tx_segment - Descriptor segment 275 * @hw: Hardware descriptor 276 * @node: Node in the descriptor segments list 277 * @phys: Physical address of segment 278 */ 279 struct xilinx_axidma_tx_segment { 280 struct xilinx_axidma_desc_hw hw; 281 struct list_head node; 282 dma_addr_t phys; 283 } __aligned(64); 284 285 /** 286 * struct xilinx_cdma_tx_segment - Descriptor segment 287 * @hw: Hardware descriptor 288 * @node: Node in the descriptor segments list 289 * @phys: Physical address of segment 290 */ 291 struct xilinx_cdma_tx_segment { 292 struct xilinx_cdma_desc_hw hw; 293 struct list_head node; 294 dma_addr_t phys; 295 } __aligned(64); 296 297 /** 298 * struct xilinx_dma_tx_descriptor - Per Transaction structure 299 * @async_tx: Async transaction descriptor 300 * @segments: TX segments list 301 * @node: Node in the channel descriptors list 302 * @cyclic: Check for cyclic transfers. 303 */ 304 struct xilinx_dma_tx_descriptor { 305 struct dma_async_tx_descriptor async_tx; 306 struct list_head segments; 307 struct list_head node; 308 bool cyclic; 309 }; 310 311 /** 312 * struct xilinx_dma_chan - Driver specific DMA channel structure 313 * @xdev: Driver specific device structure 314 * @ctrl_offset: Control registers offset 315 * @desc_offset: TX descriptor registers offset 316 * @lock: Descriptor operation lock 317 * @pending_list: Descriptors waiting 318 * @active_list: Descriptors ready to submit 319 * @done_list: Complete descriptors 320 * @free_seg_list: Free descriptors 321 * @common: DMA common channel 322 * @desc_pool: Descriptors pool 323 * @dev: The dma device 324 * @irq: Channel IRQ 325 * @id: Channel ID 326 * @direction: Transfer direction 327 * @num_frms: Number of frames 328 * @has_sg: Support scatter transfers 329 * @cyclic: Check for cyclic transfers. 330 * @genlock: Support genlock mode 331 * @err: Channel has errors 332 * @idle: Check for channel idle 333 * @tasklet: Cleanup work after irq 334 * @config: Device configuration info 335 * @flush_on_fsync: Flush on Frame sync 336 * @desc_pendingcount: Descriptor pending count 337 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 338 * @desc_submitcount: Descriptor h/w submitted count 339 * @residue: Residue for AXI DMA 340 * @seg_v: Statically allocated segments base 341 * @seg_p: Physical allocated segments base 342 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 343 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 344 * @start_transfer: Differentiate b/w DMA IP's transfer 345 * @stop_transfer: Differentiate b/w DMA IP's quiesce 346 * @tdest: TDEST value for mcdma 347 * @has_vflip: S2MM vertical flip 348 */ 349 struct xilinx_dma_chan { 350 struct xilinx_dma_device *xdev; 351 u32 ctrl_offset; 352 u32 desc_offset; 353 spinlock_t lock; 354 struct list_head pending_list; 355 struct list_head active_list; 356 struct list_head done_list; 357 struct list_head free_seg_list; 358 struct dma_chan common; 359 struct dma_pool *desc_pool; 360 struct device *dev; 361 int irq; 362 int id; 363 enum dma_transfer_direction direction; 364 int num_frms; 365 bool has_sg; 366 bool cyclic; 367 bool genlock; 368 bool err; 369 bool idle; 370 struct tasklet_struct tasklet; 371 struct xilinx_vdma_config config; 372 bool flush_on_fsync; 373 u32 desc_pendingcount; 374 bool ext_addr; 375 u32 desc_submitcount; 376 u32 residue; 377 struct xilinx_axidma_tx_segment *seg_v; 378 dma_addr_t seg_p; 379 struct xilinx_axidma_tx_segment *cyclic_seg_v; 380 dma_addr_t cyclic_seg_p; 381 void (*start_transfer)(struct xilinx_dma_chan *chan); 382 int (*stop_transfer)(struct xilinx_dma_chan *chan); 383 u16 tdest; 384 bool has_vflip; 385 }; 386 387 /** 388 * enum xdma_ip_type - DMA IP type. 389 * 390 * @XDMA_TYPE_AXIDMA: Axi dma ip. 391 * @XDMA_TYPE_CDMA: Axi cdma ip. 392 * @XDMA_TYPE_VDMA: Axi vdma ip. 393 * 394 */ 395 enum xdma_ip_type { 396 XDMA_TYPE_AXIDMA = 0, 397 XDMA_TYPE_CDMA, 398 XDMA_TYPE_VDMA, 399 }; 400 401 struct xilinx_dma_config { 402 enum xdma_ip_type dmatype; 403 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 404 struct clk **tx_clk, struct clk **txs_clk, 405 struct clk **rx_clk, struct clk **rxs_clk); 406 }; 407 408 /** 409 * struct xilinx_dma_device - DMA device structure 410 * @regs: I/O mapped base address 411 * @dev: Device Structure 412 * @common: DMA device structure 413 * @chan: Driver specific DMA channel 414 * @mcdma: Specifies whether Multi-Channel is present or not 415 * @flush_on_fsync: Flush on frame sync 416 * @ext_addr: Indicates 64 bit addressing is supported by dma device 417 * @pdev: Platform device structure pointer 418 * @dma_config: DMA config structure 419 * @axi_clk: DMA Axi4-lite interace clock 420 * @tx_clk: DMA mm2s clock 421 * @txs_clk: DMA mm2s stream clock 422 * @rx_clk: DMA s2mm clock 423 * @rxs_clk: DMA s2mm stream clock 424 * @nr_channels: Number of channels DMA device supports 425 * @chan_id: DMA channel identifier 426 * @max_buffer_len: Max buffer length 427 */ 428 struct xilinx_dma_device { 429 void __iomem *regs; 430 struct device *dev; 431 struct dma_device common; 432 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 433 bool mcdma; 434 u32 flush_on_fsync; 435 bool ext_addr; 436 struct platform_device *pdev; 437 const struct xilinx_dma_config *dma_config; 438 struct clk *axi_clk; 439 struct clk *tx_clk; 440 struct clk *txs_clk; 441 struct clk *rx_clk; 442 struct clk *rxs_clk; 443 u32 nr_channels; 444 u32 chan_id; 445 u32 max_buffer_len; 446 }; 447 448 /* Macros */ 449 #define to_xilinx_chan(chan) \ 450 container_of(chan, struct xilinx_dma_chan, common) 451 #define to_dma_tx_descriptor(tx) \ 452 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 453 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 454 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 455 cond, delay_us, timeout_us) 456 457 /* IO accessors */ 458 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 459 { 460 return ioread32(chan->xdev->regs + reg); 461 } 462 463 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 464 { 465 iowrite32(value, chan->xdev->regs + reg); 466 } 467 468 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 469 u32 value) 470 { 471 dma_write(chan, chan->desc_offset + reg, value); 472 } 473 474 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 475 { 476 return dma_read(chan, chan->ctrl_offset + reg); 477 } 478 479 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 480 u32 value) 481 { 482 dma_write(chan, chan->ctrl_offset + reg, value); 483 } 484 485 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 486 u32 clr) 487 { 488 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 489 } 490 491 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 492 u32 set) 493 { 494 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 495 } 496 497 /** 498 * vdma_desc_write_64 - 64-bit descriptor write 499 * @chan: Driver specific VDMA channel 500 * @reg: Register to write 501 * @value_lsb: lower address of the descriptor. 502 * @value_msb: upper address of the descriptor. 503 * 504 * Since vdma driver is trying to write to a register offset which is not a 505 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 506 * instead of a single 64 bit register write. 507 */ 508 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 509 u32 value_lsb, u32 value_msb) 510 { 511 /* Write the lsb 32 bits*/ 512 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 513 514 /* Write the msb 32 bits */ 515 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 516 } 517 518 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 519 { 520 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 521 } 522 523 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 524 dma_addr_t addr) 525 { 526 if (chan->ext_addr) 527 dma_writeq(chan, reg, addr); 528 else 529 dma_ctrl_write(chan, reg, addr); 530 } 531 532 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 533 struct xilinx_axidma_desc_hw *hw, 534 dma_addr_t buf_addr, size_t sg_used, 535 size_t period_len) 536 { 537 if (chan->ext_addr) { 538 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 539 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 540 period_len); 541 } else { 542 hw->buf_addr = buf_addr + sg_used + period_len; 543 } 544 } 545 546 /* ----------------------------------------------------------------------------- 547 * Descriptors and segments alloc and free 548 */ 549 550 /** 551 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 552 * @chan: Driver specific DMA channel 553 * 554 * Return: The allocated segment on success and NULL on failure. 555 */ 556 static struct xilinx_vdma_tx_segment * 557 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 558 { 559 struct xilinx_vdma_tx_segment *segment; 560 dma_addr_t phys; 561 562 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 563 if (!segment) 564 return NULL; 565 566 segment->phys = phys; 567 568 return segment; 569 } 570 571 /** 572 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 573 * @chan: Driver specific DMA channel 574 * 575 * Return: The allocated segment on success and NULL on failure. 576 */ 577 static struct xilinx_cdma_tx_segment * 578 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 579 { 580 struct xilinx_cdma_tx_segment *segment; 581 dma_addr_t phys; 582 583 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 584 if (!segment) 585 return NULL; 586 587 segment->phys = phys; 588 589 return segment; 590 } 591 592 /** 593 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 594 * @chan: Driver specific DMA channel 595 * 596 * Return: The allocated segment on success and NULL on failure. 597 */ 598 static struct xilinx_axidma_tx_segment * 599 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 600 { 601 struct xilinx_axidma_tx_segment *segment = NULL; 602 unsigned long flags; 603 604 spin_lock_irqsave(&chan->lock, flags); 605 if (!list_empty(&chan->free_seg_list)) { 606 segment = list_first_entry(&chan->free_seg_list, 607 struct xilinx_axidma_tx_segment, 608 node); 609 list_del(&segment->node); 610 } 611 spin_unlock_irqrestore(&chan->lock, flags); 612 613 return segment; 614 } 615 616 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 617 { 618 u32 next_desc = hw->next_desc; 619 u32 next_desc_msb = hw->next_desc_msb; 620 621 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 622 623 hw->next_desc = next_desc; 624 hw->next_desc_msb = next_desc_msb; 625 } 626 627 /** 628 * xilinx_dma_free_tx_segment - Free transaction segment 629 * @chan: Driver specific DMA channel 630 * @segment: DMA transaction segment 631 */ 632 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 633 struct xilinx_axidma_tx_segment *segment) 634 { 635 xilinx_dma_clean_hw_desc(&segment->hw); 636 637 list_add_tail(&segment->node, &chan->free_seg_list); 638 } 639 640 /** 641 * xilinx_cdma_free_tx_segment - Free transaction segment 642 * @chan: Driver specific DMA channel 643 * @segment: DMA transaction segment 644 */ 645 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 646 struct xilinx_cdma_tx_segment *segment) 647 { 648 dma_pool_free(chan->desc_pool, segment, segment->phys); 649 } 650 651 /** 652 * xilinx_vdma_free_tx_segment - Free transaction segment 653 * @chan: Driver specific DMA channel 654 * @segment: DMA transaction segment 655 */ 656 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 657 struct xilinx_vdma_tx_segment *segment) 658 { 659 dma_pool_free(chan->desc_pool, segment, segment->phys); 660 } 661 662 /** 663 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 664 * @chan: Driver specific DMA channel 665 * 666 * Return: The allocated descriptor on success and NULL on failure. 667 */ 668 static struct xilinx_dma_tx_descriptor * 669 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 670 { 671 struct xilinx_dma_tx_descriptor *desc; 672 673 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 674 if (!desc) 675 return NULL; 676 677 INIT_LIST_HEAD(&desc->segments); 678 679 return desc; 680 } 681 682 /** 683 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 684 * @chan: Driver specific DMA channel 685 * @desc: DMA transaction descriptor 686 */ 687 static void 688 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 689 struct xilinx_dma_tx_descriptor *desc) 690 { 691 struct xilinx_vdma_tx_segment *segment, *next; 692 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 693 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 694 695 if (!desc) 696 return; 697 698 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 699 list_for_each_entry_safe(segment, next, &desc->segments, node) { 700 list_del(&segment->node); 701 xilinx_vdma_free_tx_segment(chan, segment); 702 } 703 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 704 list_for_each_entry_safe(cdma_segment, cdma_next, 705 &desc->segments, node) { 706 list_del(&cdma_segment->node); 707 xilinx_cdma_free_tx_segment(chan, cdma_segment); 708 } 709 } else { 710 list_for_each_entry_safe(axidma_segment, axidma_next, 711 &desc->segments, node) { 712 list_del(&axidma_segment->node); 713 xilinx_dma_free_tx_segment(chan, axidma_segment); 714 } 715 } 716 717 kfree(desc); 718 } 719 720 /* Required functions */ 721 722 /** 723 * xilinx_dma_free_desc_list - Free descriptors list 724 * @chan: Driver specific DMA channel 725 * @list: List to parse and delete the descriptor 726 */ 727 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 728 struct list_head *list) 729 { 730 struct xilinx_dma_tx_descriptor *desc, *next; 731 732 list_for_each_entry_safe(desc, next, list, node) { 733 list_del(&desc->node); 734 xilinx_dma_free_tx_descriptor(chan, desc); 735 } 736 } 737 738 /** 739 * xilinx_dma_free_descriptors - Free channel descriptors 740 * @chan: Driver specific DMA channel 741 */ 742 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 743 { 744 unsigned long flags; 745 746 spin_lock_irqsave(&chan->lock, flags); 747 748 xilinx_dma_free_desc_list(chan, &chan->pending_list); 749 xilinx_dma_free_desc_list(chan, &chan->done_list); 750 xilinx_dma_free_desc_list(chan, &chan->active_list); 751 752 spin_unlock_irqrestore(&chan->lock, flags); 753 } 754 755 /** 756 * xilinx_dma_free_chan_resources - Free channel resources 757 * @dchan: DMA channel 758 */ 759 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 760 { 761 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 762 unsigned long flags; 763 764 dev_dbg(chan->dev, "Free all channel resources.\n"); 765 766 xilinx_dma_free_descriptors(chan); 767 768 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 769 spin_lock_irqsave(&chan->lock, flags); 770 INIT_LIST_HEAD(&chan->free_seg_list); 771 spin_unlock_irqrestore(&chan->lock, flags); 772 773 /* Free memory that is allocated for BD */ 774 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 775 XILINX_DMA_NUM_DESCS, chan->seg_v, 776 chan->seg_p); 777 778 /* Free Memory that is allocated for cyclic DMA Mode */ 779 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 780 chan->cyclic_seg_v, chan->cyclic_seg_p); 781 } 782 783 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 784 dma_pool_destroy(chan->desc_pool); 785 chan->desc_pool = NULL; 786 } 787 } 788 789 /** 790 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 791 * @chan: Driver specific dma channel 792 * @desc: dma transaction descriptor 793 * @flags: flags for spin lock 794 */ 795 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 796 struct xilinx_dma_tx_descriptor *desc, 797 unsigned long *flags) 798 { 799 dma_async_tx_callback callback; 800 void *callback_param; 801 802 callback = desc->async_tx.callback; 803 callback_param = desc->async_tx.callback_param; 804 if (callback) { 805 spin_unlock_irqrestore(&chan->lock, *flags); 806 callback(callback_param); 807 spin_lock_irqsave(&chan->lock, *flags); 808 } 809 } 810 811 /** 812 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 813 * @chan: Driver specific DMA channel 814 */ 815 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 816 { 817 struct xilinx_dma_tx_descriptor *desc, *next; 818 unsigned long flags; 819 820 spin_lock_irqsave(&chan->lock, flags); 821 822 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 823 struct dmaengine_desc_callback cb; 824 825 if (desc->cyclic) { 826 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 827 break; 828 } 829 830 /* Remove from the list of running transactions */ 831 list_del(&desc->node); 832 833 /* Run the link descriptor callback function */ 834 dmaengine_desc_get_callback(&desc->async_tx, &cb); 835 if (dmaengine_desc_callback_valid(&cb)) { 836 spin_unlock_irqrestore(&chan->lock, flags); 837 dmaengine_desc_callback_invoke(&cb, NULL); 838 spin_lock_irqsave(&chan->lock, flags); 839 } 840 841 /* Run any dependencies, then free the descriptor */ 842 dma_run_dependencies(&desc->async_tx); 843 xilinx_dma_free_tx_descriptor(chan, desc); 844 } 845 846 spin_unlock_irqrestore(&chan->lock, flags); 847 } 848 849 /** 850 * xilinx_dma_do_tasklet - Schedule completion tasklet 851 * @data: Pointer to the Xilinx DMA channel structure 852 */ 853 static void xilinx_dma_do_tasklet(unsigned long data) 854 { 855 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 856 857 xilinx_dma_chan_desc_cleanup(chan); 858 } 859 860 /** 861 * xilinx_dma_alloc_chan_resources - Allocate channel resources 862 * @dchan: DMA channel 863 * 864 * Return: '0' on success and failure value on error 865 */ 866 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 867 { 868 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 869 int i; 870 871 /* Has this channel already been allocated? */ 872 if (chan->desc_pool) 873 return 0; 874 875 /* 876 * We need the descriptor to be aligned to 64bytes 877 * for meeting Xilinx VDMA specification requirement. 878 */ 879 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 880 /* Allocate the buffer descriptors. */ 881 chan->seg_v = dma_alloc_coherent(chan->dev, 882 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 883 &chan->seg_p, GFP_KERNEL); 884 if (!chan->seg_v) { 885 dev_err(chan->dev, 886 "unable to allocate channel %d descriptors\n", 887 chan->id); 888 return -ENOMEM; 889 } 890 /* 891 * For cyclic DMA mode we need to program the tail Descriptor 892 * register with a value which is not a part of the BD chain 893 * so allocating a desc segment during channel allocation for 894 * programming tail descriptor. 895 */ 896 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 897 sizeof(*chan->cyclic_seg_v), 898 &chan->cyclic_seg_p, 899 GFP_KERNEL); 900 if (!chan->cyclic_seg_v) { 901 dev_err(chan->dev, 902 "unable to allocate desc segment for cyclic DMA\n"); 903 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 904 XILINX_DMA_NUM_DESCS, chan->seg_v, 905 chan->seg_p); 906 return -ENOMEM; 907 } 908 chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 909 910 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 911 chan->seg_v[i].hw.next_desc = 912 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 913 ((i + 1) % XILINX_DMA_NUM_DESCS)); 914 chan->seg_v[i].hw.next_desc_msb = 915 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 916 ((i + 1) % XILINX_DMA_NUM_DESCS)); 917 chan->seg_v[i].phys = chan->seg_p + 918 sizeof(*chan->seg_v) * i; 919 list_add_tail(&chan->seg_v[i].node, 920 &chan->free_seg_list); 921 } 922 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 923 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 924 chan->dev, 925 sizeof(struct xilinx_cdma_tx_segment), 926 __alignof__(struct xilinx_cdma_tx_segment), 927 0); 928 } else { 929 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 930 chan->dev, 931 sizeof(struct xilinx_vdma_tx_segment), 932 __alignof__(struct xilinx_vdma_tx_segment), 933 0); 934 } 935 936 if (!chan->desc_pool && 937 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 938 dev_err(chan->dev, 939 "unable to allocate channel %d descriptor pool\n", 940 chan->id); 941 return -ENOMEM; 942 } 943 944 dma_cookie_init(dchan); 945 946 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 947 /* For AXI DMA resetting once channel will reset the 948 * other channel as well so enable the interrupts here. 949 */ 950 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 951 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 952 } 953 954 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 955 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 956 XILINX_CDMA_CR_SGMODE); 957 958 return 0; 959 } 960 961 /** 962 * xilinx_dma_calc_copysize - Calculate the amount of data to copy 963 * @chan: Driver specific DMA channel 964 * @size: Total data that needs to be copied 965 * @done: Amount of data that has been already copied 966 * 967 * Return: Amount of data that has to be copied 968 */ 969 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, 970 int size, int done) 971 { 972 size_t copy; 973 974 copy = min_t(size_t, size - done, 975 chan->xdev->max_buffer_len); 976 977 if ((copy + done < size) && 978 chan->xdev->common.copy_align) { 979 /* 980 * If this is not the last descriptor, make sure 981 * the next one will be properly aligned 982 */ 983 copy = rounddown(copy, 984 (1 << chan->xdev->common.copy_align)); 985 } 986 return copy; 987 } 988 989 /** 990 * xilinx_dma_tx_status - Get DMA transaction status 991 * @dchan: DMA channel 992 * @cookie: Transaction identifier 993 * @txstate: Transaction state 994 * 995 * Return: DMA transaction status 996 */ 997 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 998 dma_cookie_t cookie, 999 struct dma_tx_state *txstate) 1000 { 1001 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1002 struct xilinx_dma_tx_descriptor *desc; 1003 struct xilinx_axidma_tx_segment *segment; 1004 struct xilinx_axidma_desc_hw *hw; 1005 enum dma_status ret; 1006 unsigned long flags; 1007 u32 residue = 0; 1008 1009 ret = dma_cookie_status(dchan, cookie, txstate); 1010 if (ret == DMA_COMPLETE || !txstate) 1011 return ret; 1012 1013 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1014 spin_lock_irqsave(&chan->lock, flags); 1015 1016 desc = list_last_entry(&chan->active_list, 1017 struct xilinx_dma_tx_descriptor, node); 1018 if (chan->has_sg) { 1019 list_for_each_entry(segment, &desc->segments, node) { 1020 hw = &segment->hw; 1021 residue += (hw->control - hw->status) & 1022 chan->xdev->max_buffer_len; 1023 } 1024 } 1025 spin_unlock_irqrestore(&chan->lock, flags); 1026 1027 chan->residue = residue; 1028 dma_set_residue(txstate, chan->residue); 1029 } 1030 1031 return ret; 1032 } 1033 1034 /** 1035 * xilinx_dma_stop_transfer - Halt DMA channel 1036 * @chan: Driver specific DMA channel 1037 * 1038 * Return: '0' on success and failure value on error 1039 */ 1040 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1041 { 1042 u32 val; 1043 1044 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1045 1046 /* Wait for the hardware to halt */ 1047 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1048 val & XILINX_DMA_DMASR_HALTED, 0, 1049 XILINX_DMA_LOOP_COUNT); 1050 } 1051 1052 /** 1053 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1054 * @chan: Driver specific DMA channel 1055 * 1056 * Return: '0' on success and failure value on error 1057 */ 1058 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1059 { 1060 u32 val; 1061 1062 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1063 val & XILINX_DMA_DMASR_IDLE, 0, 1064 XILINX_DMA_LOOP_COUNT); 1065 } 1066 1067 /** 1068 * xilinx_dma_start - Start DMA channel 1069 * @chan: Driver specific DMA channel 1070 */ 1071 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 1072 { 1073 int err; 1074 u32 val; 1075 1076 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1077 1078 /* Wait for the hardware to start */ 1079 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1080 !(val & XILINX_DMA_DMASR_HALTED), 0, 1081 XILINX_DMA_LOOP_COUNT); 1082 1083 if (err) { 1084 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1085 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1086 1087 chan->err = true; 1088 } 1089 } 1090 1091 /** 1092 * xilinx_vdma_start_transfer - Starts VDMA transfer 1093 * @chan: Driver specific channel struct pointer 1094 */ 1095 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1096 { 1097 struct xilinx_vdma_config *config = &chan->config; 1098 struct xilinx_dma_tx_descriptor *desc; 1099 u32 reg, j; 1100 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1101 int i = 0; 1102 1103 /* This function was invoked with lock held */ 1104 if (chan->err) 1105 return; 1106 1107 if (!chan->idle) 1108 return; 1109 1110 if (list_empty(&chan->pending_list)) 1111 return; 1112 1113 desc = list_first_entry(&chan->pending_list, 1114 struct xilinx_dma_tx_descriptor, node); 1115 1116 /* Configure the hardware using info in the config structure */ 1117 if (chan->has_vflip) { 1118 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1119 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1120 reg |= config->vflip_en; 1121 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1122 reg); 1123 } 1124 1125 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1126 1127 if (config->frm_cnt_en) 1128 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1129 else 1130 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1131 1132 /* If not parking, enable circular mode */ 1133 if (config->park) 1134 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1135 else 1136 reg |= XILINX_DMA_DMACR_CIRC_EN; 1137 1138 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1139 1140 j = chan->desc_submitcount; 1141 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 1142 if (chan->direction == DMA_MEM_TO_DEV) { 1143 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 1144 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 1145 } else { 1146 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 1147 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 1148 } 1149 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 1150 1151 /* Start the hardware */ 1152 xilinx_dma_start(chan); 1153 1154 if (chan->err) 1155 return; 1156 1157 /* Start the transfer */ 1158 if (chan->desc_submitcount < chan->num_frms) 1159 i = chan->desc_submitcount; 1160 1161 list_for_each_entry(segment, &desc->segments, node) { 1162 if (chan->ext_addr) 1163 vdma_desc_write_64(chan, 1164 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1165 segment->hw.buf_addr, 1166 segment->hw.buf_addr_msb); 1167 else 1168 vdma_desc_write(chan, 1169 XILINX_VDMA_REG_START_ADDRESS(i++), 1170 segment->hw.buf_addr); 1171 1172 last = segment; 1173 } 1174 1175 if (!last) 1176 return; 1177 1178 /* HW expects these parameters to be same for one transaction */ 1179 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1180 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1181 last->hw.stride); 1182 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1183 1184 chan->desc_submitcount++; 1185 chan->desc_pendingcount--; 1186 list_del(&desc->node); 1187 list_add_tail(&desc->node, &chan->active_list); 1188 if (chan->desc_submitcount == chan->num_frms) 1189 chan->desc_submitcount = 0; 1190 1191 chan->idle = false; 1192 } 1193 1194 /** 1195 * xilinx_cdma_start_transfer - Starts cdma transfer 1196 * @chan: Driver specific channel struct pointer 1197 */ 1198 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1199 { 1200 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1201 struct xilinx_cdma_tx_segment *tail_segment; 1202 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1203 1204 if (chan->err) 1205 return; 1206 1207 if (!chan->idle) 1208 return; 1209 1210 if (list_empty(&chan->pending_list)) 1211 return; 1212 1213 head_desc = list_first_entry(&chan->pending_list, 1214 struct xilinx_dma_tx_descriptor, node); 1215 tail_desc = list_last_entry(&chan->pending_list, 1216 struct xilinx_dma_tx_descriptor, node); 1217 tail_segment = list_last_entry(&tail_desc->segments, 1218 struct xilinx_cdma_tx_segment, node); 1219 1220 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1221 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1222 ctrl_reg |= chan->desc_pendingcount << 1223 XILINX_DMA_CR_COALESCE_SHIFT; 1224 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1225 } 1226 1227 if (chan->has_sg) { 1228 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 1229 XILINX_CDMA_CR_SGMODE); 1230 1231 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1232 XILINX_CDMA_CR_SGMODE); 1233 1234 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1235 head_desc->async_tx.phys); 1236 1237 /* Update tail ptr register which will start the transfer */ 1238 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1239 tail_segment->phys); 1240 } else { 1241 /* In simple mode */ 1242 struct xilinx_cdma_tx_segment *segment; 1243 struct xilinx_cdma_desc_hw *hw; 1244 1245 segment = list_first_entry(&head_desc->segments, 1246 struct xilinx_cdma_tx_segment, 1247 node); 1248 1249 hw = &segment->hw; 1250 1251 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, 1252 xilinx_prep_dma_addr_t(hw->src_addr)); 1253 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, 1254 xilinx_prep_dma_addr_t(hw->dest_addr)); 1255 1256 /* Start the transfer */ 1257 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1258 hw->control & chan->xdev->max_buffer_len); 1259 } 1260 1261 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1262 chan->desc_pendingcount = 0; 1263 chan->idle = false; 1264 } 1265 1266 /** 1267 * xilinx_dma_start_transfer - Starts DMA transfer 1268 * @chan: Driver specific channel struct pointer 1269 */ 1270 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1271 { 1272 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1273 struct xilinx_axidma_tx_segment *tail_segment; 1274 u32 reg; 1275 1276 if (chan->err) 1277 return; 1278 1279 if (list_empty(&chan->pending_list)) 1280 return; 1281 1282 if (!chan->idle) 1283 return; 1284 1285 head_desc = list_first_entry(&chan->pending_list, 1286 struct xilinx_dma_tx_descriptor, node); 1287 tail_desc = list_last_entry(&chan->pending_list, 1288 struct xilinx_dma_tx_descriptor, node); 1289 tail_segment = list_last_entry(&tail_desc->segments, 1290 struct xilinx_axidma_tx_segment, node); 1291 1292 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1293 1294 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1295 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1296 reg |= chan->desc_pendingcount << 1297 XILINX_DMA_CR_COALESCE_SHIFT; 1298 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1299 } 1300 1301 if (chan->has_sg && !chan->xdev->mcdma) 1302 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1303 head_desc->async_tx.phys); 1304 1305 if (chan->has_sg && chan->xdev->mcdma) { 1306 if (chan->direction == DMA_MEM_TO_DEV) { 1307 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1308 head_desc->async_tx.phys); 1309 } else { 1310 if (!chan->tdest) { 1311 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1312 head_desc->async_tx.phys); 1313 } else { 1314 dma_ctrl_write(chan, 1315 XILINX_DMA_MCRX_CDESC(chan->tdest), 1316 head_desc->async_tx.phys); 1317 } 1318 } 1319 } 1320 1321 xilinx_dma_start(chan); 1322 1323 if (chan->err) 1324 return; 1325 1326 /* Start the transfer */ 1327 if (chan->has_sg && !chan->xdev->mcdma) { 1328 if (chan->cyclic) 1329 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1330 chan->cyclic_seg_v->phys); 1331 else 1332 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1333 tail_segment->phys); 1334 } else if (chan->has_sg && chan->xdev->mcdma) { 1335 if (chan->direction == DMA_MEM_TO_DEV) { 1336 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1337 tail_segment->phys); 1338 } else { 1339 if (!chan->tdest) { 1340 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1341 tail_segment->phys); 1342 } else { 1343 dma_ctrl_write(chan, 1344 XILINX_DMA_MCRX_TDESC(chan->tdest), 1345 tail_segment->phys); 1346 } 1347 } 1348 } else { 1349 struct xilinx_axidma_tx_segment *segment; 1350 struct xilinx_axidma_desc_hw *hw; 1351 1352 segment = list_first_entry(&head_desc->segments, 1353 struct xilinx_axidma_tx_segment, 1354 node); 1355 hw = &segment->hw; 1356 1357 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1358 1359 /* Start the transfer */ 1360 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1361 hw->control & chan->xdev->max_buffer_len); 1362 } 1363 1364 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1365 chan->desc_pendingcount = 0; 1366 chan->idle = false; 1367 } 1368 1369 /** 1370 * xilinx_dma_issue_pending - Issue pending transactions 1371 * @dchan: DMA channel 1372 */ 1373 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1374 { 1375 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1376 unsigned long flags; 1377 1378 spin_lock_irqsave(&chan->lock, flags); 1379 chan->start_transfer(chan); 1380 spin_unlock_irqrestore(&chan->lock, flags); 1381 } 1382 1383 /** 1384 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1385 * @chan : xilinx DMA channel 1386 * 1387 * CONTEXT: hardirq 1388 */ 1389 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1390 { 1391 struct xilinx_dma_tx_descriptor *desc, *next; 1392 1393 /* This function was invoked with lock held */ 1394 if (list_empty(&chan->active_list)) 1395 return; 1396 1397 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1398 list_del(&desc->node); 1399 if (!desc->cyclic) 1400 dma_cookie_complete(&desc->async_tx); 1401 list_add_tail(&desc->node, &chan->done_list); 1402 } 1403 } 1404 1405 /** 1406 * xilinx_dma_reset - Reset DMA channel 1407 * @chan: Driver specific DMA channel 1408 * 1409 * Return: '0' on success and failure value on error 1410 */ 1411 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1412 { 1413 int err; 1414 u32 tmp; 1415 1416 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1417 1418 /* Wait for the hardware to finish reset */ 1419 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1420 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1421 XILINX_DMA_LOOP_COUNT); 1422 1423 if (err) { 1424 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1425 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1426 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1427 return -ETIMEDOUT; 1428 } 1429 1430 chan->err = false; 1431 chan->idle = true; 1432 chan->desc_submitcount = 0; 1433 1434 return err; 1435 } 1436 1437 /** 1438 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1439 * @chan: Driver specific DMA channel 1440 * 1441 * Return: '0' on success and failure value on error 1442 */ 1443 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1444 { 1445 int err; 1446 1447 /* Reset VDMA */ 1448 err = xilinx_dma_reset(chan); 1449 if (err) 1450 return err; 1451 1452 /* Enable interrupts */ 1453 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1454 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1455 1456 return 0; 1457 } 1458 1459 /** 1460 * xilinx_dma_irq_handler - DMA Interrupt handler 1461 * @irq: IRQ number 1462 * @data: Pointer to the Xilinx DMA channel structure 1463 * 1464 * Return: IRQ_HANDLED/IRQ_NONE 1465 */ 1466 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1467 { 1468 struct xilinx_dma_chan *chan = data; 1469 u32 status; 1470 1471 /* Read the status and ack the interrupts. */ 1472 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1473 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1474 return IRQ_NONE; 1475 1476 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1477 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1478 1479 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1480 /* 1481 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1482 * error is recoverable, ignore it. Otherwise flag the error. 1483 * 1484 * Only recoverable errors can be cleared in the DMASR register, 1485 * make sure not to write to other error bits to 1. 1486 */ 1487 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1488 1489 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1490 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1491 1492 if (!chan->flush_on_fsync || 1493 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1494 dev_err(chan->dev, 1495 "Channel %p has errors %x, cdr %x tdr %x\n", 1496 chan, errors, 1497 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1498 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1499 chan->err = true; 1500 } 1501 } 1502 1503 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1504 /* 1505 * Device takes too long to do the transfer when user requires 1506 * responsiveness. 1507 */ 1508 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1509 } 1510 1511 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1512 spin_lock(&chan->lock); 1513 xilinx_dma_complete_descriptor(chan); 1514 chan->idle = true; 1515 chan->start_transfer(chan); 1516 spin_unlock(&chan->lock); 1517 } 1518 1519 tasklet_schedule(&chan->tasklet); 1520 return IRQ_HANDLED; 1521 } 1522 1523 /** 1524 * append_desc_queue - Queuing descriptor 1525 * @chan: Driver specific dma channel 1526 * @desc: dma transaction descriptor 1527 */ 1528 static void append_desc_queue(struct xilinx_dma_chan *chan, 1529 struct xilinx_dma_tx_descriptor *desc) 1530 { 1531 struct xilinx_vdma_tx_segment *tail_segment; 1532 struct xilinx_dma_tx_descriptor *tail_desc; 1533 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1534 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1535 1536 if (list_empty(&chan->pending_list)) 1537 goto append; 1538 1539 /* 1540 * Add the hardware descriptor to the chain of hardware descriptors 1541 * that already exists in memory. 1542 */ 1543 tail_desc = list_last_entry(&chan->pending_list, 1544 struct xilinx_dma_tx_descriptor, node); 1545 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1546 tail_segment = list_last_entry(&tail_desc->segments, 1547 struct xilinx_vdma_tx_segment, 1548 node); 1549 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1550 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1551 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1552 struct xilinx_cdma_tx_segment, 1553 node); 1554 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1555 } else { 1556 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1557 struct xilinx_axidma_tx_segment, 1558 node); 1559 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1560 } 1561 1562 /* 1563 * Add the software descriptor and all children to the list 1564 * of pending transactions 1565 */ 1566 append: 1567 list_add_tail(&desc->node, &chan->pending_list); 1568 chan->desc_pendingcount++; 1569 1570 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1571 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1572 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1573 chan->desc_pendingcount = chan->num_frms; 1574 } 1575 } 1576 1577 /** 1578 * xilinx_dma_tx_submit - Submit DMA transaction 1579 * @tx: Async transaction descriptor 1580 * 1581 * Return: cookie value on success and failure value on error 1582 */ 1583 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1584 { 1585 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1586 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1587 dma_cookie_t cookie; 1588 unsigned long flags; 1589 int err; 1590 1591 if (chan->cyclic) { 1592 xilinx_dma_free_tx_descriptor(chan, desc); 1593 return -EBUSY; 1594 } 1595 1596 if (chan->err) { 1597 /* 1598 * If reset fails, need to hard reset the system. 1599 * Channel is no longer functional 1600 */ 1601 err = xilinx_dma_chan_reset(chan); 1602 if (err < 0) 1603 return err; 1604 } 1605 1606 spin_lock_irqsave(&chan->lock, flags); 1607 1608 cookie = dma_cookie_assign(tx); 1609 1610 /* Put this transaction onto the tail of the pending queue */ 1611 append_desc_queue(chan, desc); 1612 1613 if (desc->cyclic) 1614 chan->cyclic = true; 1615 1616 spin_unlock_irqrestore(&chan->lock, flags); 1617 1618 return cookie; 1619 } 1620 1621 /** 1622 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1623 * DMA_SLAVE transaction 1624 * @dchan: DMA channel 1625 * @xt: Interleaved template pointer 1626 * @flags: transfer ack flags 1627 * 1628 * Return: Async transaction descriptor on success and NULL on failure 1629 */ 1630 static struct dma_async_tx_descriptor * 1631 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1632 struct dma_interleaved_template *xt, 1633 unsigned long flags) 1634 { 1635 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1636 struct xilinx_dma_tx_descriptor *desc; 1637 struct xilinx_vdma_tx_segment *segment; 1638 struct xilinx_vdma_desc_hw *hw; 1639 1640 if (!is_slave_direction(xt->dir)) 1641 return NULL; 1642 1643 if (!xt->numf || !xt->sgl[0].size) 1644 return NULL; 1645 1646 if (xt->frame_size != 1) 1647 return NULL; 1648 1649 /* Allocate a transaction descriptor. */ 1650 desc = xilinx_dma_alloc_tx_descriptor(chan); 1651 if (!desc) 1652 return NULL; 1653 1654 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1655 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1656 async_tx_ack(&desc->async_tx); 1657 1658 /* Allocate the link descriptor from DMA pool */ 1659 segment = xilinx_vdma_alloc_tx_segment(chan); 1660 if (!segment) 1661 goto error; 1662 1663 /* Fill in the hardware descriptor */ 1664 hw = &segment->hw; 1665 hw->vsize = xt->numf; 1666 hw->hsize = xt->sgl[0].size; 1667 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1668 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1669 hw->stride |= chan->config.frm_dly << 1670 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1671 1672 if (xt->dir != DMA_MEM_TO_DEV) { 1673 if (chan->ext_addr) { 1674 hw->buf_addr = lower_32_bits(xt->dst_start); 1675 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1676 } else { 1677 hw->buf_addr = xt->dst_start; 1678 } 1679 } else { 1680 if (chan->ext_addr) { 1681 hw->buf_addr = lower_32_bits(xt->src_start); 1682 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1683 } else { 1684 hw->buf_addr = xt->src_start; 1685 } 1686 } 1687 1688 /* Insert the segment into the descriptor segments list. */ 1689 list_add_tail(&segment->node, &desc->segments); 1690 1691 /* Link the last hardware descriptor with the first. */ 1692 segment = list_first_entry(&desc->segments, 1693 struct xilinx_vdma_tx_segment, node); 1694 desc->async_tx.phys = segment->phys; 1695 1696 return &desc->async_tx; 1697 1698 error: 1699 xilinx_dma_free_tx_descriptor(chan, desc); 1700 return NULL; 1701 } 1702 1703 /** 1704 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1705 * @dchan: DMA channel 1706 * @dma_dst: destination address 1707 * @dma_src: source address 1708 * @len: transfer length 1709 * @flags: transfer ack flags 1710 * 1711 * Return: Async transaction descriptor on success and NULL on failure 1712 */ 1713 static struct dma_async_tx_descriptor * 1714 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1715 dma_addr_t dma_src, size_t len, unsigned long flags) 1716 { 1717 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1718 struct xilinx_dma_tx_descriptor *desc; 1719 struct xilinx_cdma_tx_segment *segment; 1720 struct xilinx_cdma_desc_hw *hw; 1721 1722 if (!len || len > chan->xdev->max_buffer_len) 1723 return NULL; 1724 1725 desc = xilinx_dma_alloc_tx_descriptor(chan); 1726 if (!desc) 1727 return NULL; 1728 1729 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1730 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1731 1732 /* Allocate the link descriptor from DMA pool */ 1733 segment = xilinx_cdma_alloc_tx_segment(chan); 1734 if (!segment) 1735 goto error; 1736 1737 hw = &segment->hw; 1738 hw->control = len; 1739 hw->src_addr = dma_src; 1740 hw->dest_addr = dma_dst; 1741 if (chan->ext_addr) { 1742 hw->src_addr_msb = upper_32_bits(dma_src); 1743 hw->dest_addr_msb = upper_32_bits(dma_dst); 1744 } 1745 1746 /* Insert the segment into the descriptor segments list. */ 1747 list_add_tail(&segment->node, &desc->segments); 1748 1749 desc->async_tx.phys = segment->phys; 1750 hw->next_desc = segment->phys; 1751 1752 return &desc->async_tx; 1753 1754 error: 1755 xilinx_dma_free_tx_descriptor(chan, desc); 1756 return NULL; 1757 } 1758 1759 /** 1760 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1761 * @dchan: DMA channel 1762 * @sgl: scatterlist to transfer to/from 1763 * @sg_len: number of entries in @scatterlist 1764 * @direction: DMA direction 1765 * @flags: transfer ack flags 1766 * @context: APP words of the descriptor 1767 * 1768 * Return: Async transaction descriptor on success and NULL on failure 1769 */ 1770 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1771 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1772 enum dma_transfer_direction direction, unsigned long flags, 1773 void *context) 1774 { 1775 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1776 struct xilinx_dma_tx_descriptor *desc; 1777 struct xilinx_axidma_tx_segment *segment = NULL; 1778 u32 *app_w = (u32 *)context; 1779 struct scatterlist *sg; 1780 size_t copy; 1781 size_t sg_used; 1782 unsigned int i; 1783 1784 if (!is_slave_direction(direction)) 1785 return NULL; 1786 1787 /* Allocate a transaction descriptor. */ 1788 desc = xilinx_dma_alloc_tx_descriptor(chan); 1789 if (!desc) 1790 return NULL; 1791 1792 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1793 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1794 1795 /* Build transactions using information in the scatter gather list */ 1796 for_each_sg(sgl, sg, sg_len, i) { 1797 sg_used = 0; 1798 1799 /* Loop until the entire scatterlist entry is used */ 1800 while (sg_used < sg_dma_len(sg)) { 1801 struct xilinx_axidma_desc_hw *hw; 1802 1803 /* Get a free segment */ 1804 segment = xilinx_axidma_alloc_tx_segment(chan); 1805 if (!segment) 1806 goto error; 1807 1808 /* 1809 * Calculate the maximum number of bytes to transfer, 1810 * making sure it is less than the hw limit 1811 */ 1812 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), 1813 sg_used); 1814 hw = &segment->hw; 1815 1816 /* Fill in the descriptor */ 1817 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1818 sg_used, 0); 1819 1820 hw->control = copy; 1821 1822 if (chan->direction == DMA_MEM_TO_DEV) { 1823 if (app_w) 1824 memcpy(hw->app, app_w, sizeof(u32) * 1825 XILINX_DMA_NUM_APP_WORDS); 1826 } 1827 1828 sg_used += copy; 1829 1830 /* 1831 * Insert the segment into the descriptor segments 1832 * list. 1833 */ 1834 list_add_tail(&segment->node, &desc->segments); 1835 } 1836 } 1837 1838 segment = list_first_entry(&desc->segments, 1839 struct xilinx_axidma_tx_segment, node); 1840 desc->async_tx.phys = segment->phys; 1841 1842 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1843 if (chan->direction == DMA_MEM_TO_DEV) { 1844 segment->hw.control |= XILINX_DMA_BD_SOP; 1845 segment = list_last_entry(&desc->segments, 1846 struct xilinx_axidma_tx_segment, 1847 node); 1848 segment->hw.control |= XILINX_DMA_BD_EOP; 1849 } 1850 1851 return &desc->async_tx; 1852 1853 error: 1854 xilinx_dma_free_tx_descriptor(chan, desc); 1855 return NULL; 1856 } 1857 1858 /** 1859 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1860 * @dchan: DMA channel 1861 * @buf_addr: Physical address of the buffer 1862 * @buf_len: Total length of the cyclic buffers 1863 * @period_len: length of individual cyclic buffer 1864 * @direction: DMA direction 1865 * @flags: transfer ack flags 1866 * 1867 * Return: Async transaction descriptor on success and NULL on failure 1868 */ 1869 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1870 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1871 size_t period_len, enum dma_transfer_direction direction, 1872 unsigned long flags) 1873 { 1874 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1875 struct xilinx_dma_tx_descriptor *desc; 1876 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1877 size_t copy, sg_used; 1878 unsigned int num_periods; 1879 int i; 1880 u32 reg; 1881 1882 if (!period_len) 1883 return NULL; 1884 1885 num_periods = buf_len / period_len; 1886 1887 if (!num_periods) 1888 return NULL; 1889 1890 if (!is_slave_direction(direction)) 1891 return NULL; 1892 1893 /* Allocate a transaction descriptor. */ 1894 desc = xilinx_dma_alloc_tx_descriptor(chan); 1895 if (!desc) 1896 return NULL; 1897 1898 chan->direction = direction; 1899 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1900 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1901 1902 for (i = 0; i < num_periods; ++i) { 1903 sg_used = 0; 1904 1905 while (sg_used < period_len) { 1906 struct xilinx_axidma_desc_hw *hw; 1907 1908 /* Get a free segment */ 1909 segment = xilinx_axidma_alloc_tx_segment(chan); 1910 if (!segment) 1911 goto error; 1912 1913 /* 1914 * Calculate the maximum number of bytes to transfer, 1915 * making sure it is less than the hw limit 1916 */ 1917 copy = xilinx_dma_calc_copysize(chan, period_len, 1918 sg_used); 1919 hw = &segment->hw; 1920 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1921 period_len * i); 1922 hw->control = copy; 1923 1924 if (prev) 1925 prev->hw.next_desc = segment->phys; 1926 1927 prev = segment; 1928 sg_used += copy; 1929 1930 /* 1931 * Insert the segment into the descriptor segments 1932 * list. 1933 */ 1934 list_add_tail(&segment->node, &desc->segments); 1935 } 1936 } 1937 1938 head_segment = list_first_entry(&desc->segments, 1939 struct xilinx_axidma_tx_segment, node); 1940 desc->async_tx.phys = head_segment->phys; 1941 1942 desc->cyclic = true; 1943 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1944 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1945 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1946 1947 segment = list_last_entry(&desc->segments, 1948 struct xilinx_axidma_tx_segment, 1949 node); 1950 segment->hw.next_desc = (u32) head_segment->phys; 1951 1952 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1953 if (direction == DMA_MEM_TO_DEV) { 1954 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1955 segment->hw.control |= XILINX_DMA_BD_EOP; 1956 } 1957 1958 return &desc->async_tx; 1959 1960 error: 1961 xilinx_dma_free_tx_descriptor(chan, desc); 1962 return NULL; 1963 } 1964 1965 /** 1966 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1967 * DMA_SLAVE transaction 1968 * @dchan: DMA channel 1969 * @xt: Interleaved template pointer 1970 * @flags: transfer ack flags 1971 * 1972 * Return: Async transaction descriptor on success and NULL on failure 1973 */ 1974 static struct dma_async_tx_descriptor * 1975 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1976 struct dma_interleaved_template *xt, 1977 unsigned long flags) 1978 { 1979 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1980 struct xilinx_dma_tx_descriptor *desc; 1981 struct xilinx_axidma_tx_segment *segment; 1982 struct xilinx_axidma_desc_hw *hw; 1983 1984 if (!is_slave_direction(xt->dir)) 1985 return NULL; 1986 1987 if (!xt->numf || !xt->sgl[0].size) 1988 return NULL; 1989 1990 if (xt->frame_size != 1) 1991 return NULL; 1992 1993 /* Allocate a transaction descriptor. */ 1994 desc = xilinx_dma_alloc_tx_descriptor(chan); 1995 if (!desc) 1996 return NULL; 1997 1998 chan->direction = xt->dir; 1999 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2000 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2001 2002 /* Get a free segment */ 2003 segment = xilinx_axidma_alloc_tx_segment(chan); 2004 if (!segment) 2005 goto error; 2006 2007 hw = &segment->hw; 2008 2009 /* Fill in the descriptor */ 2010 if (xt->dir != DMA_MEM_TO_DEV) 2011 hw->buf_addr = xt->dst_start; 2012 else 2013 hw->buf_addr = xt->src_start; 2014 2015 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 2016 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 2017 XILINX_DMA_BD_VSIZE_MASK; 2018 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 2019 XILINX_DMA_BD_STRIDE_MASK; 2020 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 2021 2022 /* 2023 * Insert the segment into the descriptor segments 2024 * list. 2025 */ 2026 list_add_tail(&segment->node, &desc->segments); 2027 2028 2029 segment = list_first_entry(&desc->segments, 2030 struct xilinx_axidma_tx_segment, node); 2031 desc->async_tx.phys = segment->phys; 2032 2033 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2034 if (xt->dir == DMA_MEM_TO_DEV) { 2035 segment->hw.control |= XILINX_DMA_BD_SOP; 2036 segment = list_last_entry(&desc->segments, 2037 struct xilinx_axidma_tx_segment, 2038 node); 2039 segment->hw.control |= XILINX_DMA_BD_EOP; 2040 } 2041 2042 return &desc->async_tx; 2043 2044 error: 2045 xilinx_dma_free_tx_descriptor(chan, desc); 2046 return NULL; 2047 } 2048 2049 /** 2050 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2051 * @dchan: Driver specific DMA Channel pointer 2052 * 2053 * Return: '0' always. 2054 */ 2055 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2056 { 2057 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2058 u32 reg; 2059 int err; 2060 2061 if (chan->cyclic) 2062 xilinx_dma_chan_reset(chan); 2063 2064 err = chan->stop_transfer(chan); 2065 if (err) { 2066 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2067 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 2068 chan->err = true; 2069 } 2070 2071 /* Remove and free all of the descriptors in the lists */ 2072 xilinx_dma_free_descriptors(chan); 2073 chan->idle = true; 2074 2075 if (chan->cyclic) { 2076 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2077 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2078 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2079 chan->cyclic = false; 2080 } 2081 2082 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 2083 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2084 XILINX_CDMA_CR_SGMODE); 2085 2086 return 0; 2087 } 2088 2089 /** 2090 * xilinx_dma_channel_set_config - Configure VDMA channel 2091 * Run-time configuration for Axi VDMA, supports: 2092 * . halt the channel 2093 * . configure interrupt coalescing and inter-packet delay threshold 2094 * . start/stop parking 2095 * . enable genlock 2096 * 2097 * @dchan: DMA channel 2098 * @cfg: VDMA device configuration pointer 2099 * 2100 * Return: '0' on success and failure value on error 2101 */ 2102 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2103 struct xilinx_vdma_config *cfg) 2104 { 2105 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2106 u32 dmacr; 2107 2108 if (cfg->reset) 2109 return xilinx_dma_chan_reset(chan); 2110 2111 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2112 2113 chan->config.frm_dly = cfg->frm_dly; 2114 chan->config.park = cfg->park; 2115 2116 /* genlock settings */ 2117 chan->config.gen_lock = cfg->gen_lock; 2118 chan->config.master = cfg->master; 2119 2120 if (cfg->gen_lock && chan->genlock) { 2121 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2122 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2123 } 2124 2125 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2126 chan->config.vflip_en = cfg->vflip_en; 2127 2128 if (cfg->park) 2129 chan->config.park_frm = cfg->park_frm; 2130 else 2131 chan->config.park_frm = -1; 2132 2133 chan->config.coalesc = cfg->coalesc; 2134 chan->config.delay = cfg->delay; 2135 2136 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2137 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2138 chan->config.coalesc = cfg->coalesc; 2139 } 2140 2141 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2142 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2143 chan->config.delay = cfg->delay; 2144 } 2145 2146 /* FSync Source selection */ 2147 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2148 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2149 2150 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2151 2152 return 0; 2153 } 2154 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2155 2156 /* ----------------------------------------------------------------------------- 2157 * Probe and remove 2158 */ 2159 2160 /** 2161 * xilinx_dma_chan_remove - Per Channel remove function 2162 * @chan: Driver specific DMA channel 2163 */ 2164 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2165 { 2166 /* Disable all interrupts */ 2167 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2168 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2169 2170 if (chan->irq > 0) 2171 free_irq(chan->irq, chan); 2172 2173 tasklet_kill(&chan->tasklet); 2174 2175 list_del(&chan->common.device_node); 2176 } 2177 2178 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2179 struct clk **tx_clk, struct clk **rx_clk, 2180 struct clk **sg_clk, struct clk **tmp_clk) 2181 { 2182 int err; 2183 2184 *tmp_clk = NULL; 2185 2186 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2187 if (IS_ERR(*axi_clk)) { 2188 err = PTR_ERR(*axi_clk); 2189 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2190 return err; 2191 } 2192 2193 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2194 if (IS_ERR(*tx_clk)) 2195 *tx_clk = NULL; 2196 2197 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2198 if (IS_ERR(*rx_clk)) 2199 *rx_clk = NULL; 2200 2201 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2202 if (IS_ERR(*sg_clk)) 2203 *sg_clk = NULL; 2204 2205 err = clk_prepare_enable(*axi_clk); 2206 if (err) { 2207 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2208 return err; 2209 } 2210 2211 err = clk_prepare_enable(*tx_clk); 2212 if (err) { 2213 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2214 goto err_disable_axiclk; 2215 } 2216 2217 err = clk_prepare_enable(*rx_clk); 2218 if (err) { 2219 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2220 goto err_disable_txclk; 2221 } 2222 2223 err = clk_prepare_enable(*sg_clk); 2224 if (err) { 2225 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2226 goto err_disable_rxclk; 2227 } 2228 2229 return 0; 2230 2231 err_disable_rxclk: 2232 clk_disable_unprepare(*rx_clk); 2233 err_disable_txclk: 2234 clk_disable_unprepare(*tx_clk); 2235 err_disable_axiclk: 2236 clk_disable_unprepare(*axi_clk); 2237 2238 return err; 2239 } 2240 2241 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2242 struct clk **dev_clk, struct clk **tmp_clk, 2243 struct clk **tmp1_clk, struct clk **tmp2_clk) 2244 { 2245 int err; 2246 2247 *tmp_clk = NULL; 2248 *tmp1_clk = NULL; 2249 *tmp2_clk = NULL; 2250 2251 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2252 if (IS_ERR(*axi_clk)) { 2253 err = PTR_ERR(*axi_clk); 2254 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 2255 return err; 2256 } 2257 2258 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2259 if (IS_ERR(*dev_clk)) { 2260 err = PTR_ERR(*dev_clk); 2261 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 2262 return err; 2263 } 2264 2265 err = clk_prepare_enable(*axi_clk); 2266 if (err) { 2267 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2268 return err; 2269 } 2270 2271 err = clk_prepare_enable(*dev_clk); 2272 if (err) { 2273 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2274 goto err_disable_axiclk; 2275 } 2276 2277 return 0; 2278 2279 err_disable_axiclk: 2280 clk_disable_unprepare(*axi_clk); 2281 2282 return err; 2283 } 2284 2285 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2286 struct clk **tx_clk, struct clk **txs_clk, 2287 struct clk **rx_clk, struct clk **rxs_clk) 2288 { 2289 int err; 2290 2291 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2292 if (IS_ERR(*axi_clk)) { 2293 err = PTR_ERR(*axi_clk); 2294 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2295 return err; 2296 } 2297 2298 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2299 if (IS_ERR(*tx_clk)) 2300 *tx_clk = NULL; 2301 2302 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2303 if (IS_ERR(*txs_clk)) 2304 *txs_clk = NULL; 2305 2306 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2307 if (IS_ERR(*rx_clk)) 2308 *rx_clk = NULL; 2309 2310 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2311 if (IS_ERR(*rxs_clk)) 2312 *rxs_clk = NULL; 2313 2314 err = clk_prepare_enable(*axi_clk); 2315 if (err) { 2316 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2317 return err; 2318 } 2319 2320 err = clk_prepare_enable(*tx_clk); 2321 if (err) { 2322 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2323 goto err_disable_axiclk; 2324 } 2325 2326 err = clk_prepare_enable(*txs_clk); 2327 if (err) { 2328 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2329 goto err_disable_txclk; 2330 } 2331 2332 err = clk_prepare_enable(*rx_clk); 2333 if (err) { 2334 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2335 goto err_disable_txsclk; 2336 } 2337 2338 err = clk_prepare_enable(*rxs_clk); 2339 if (err) { 2340 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2341 goto err_disable_rxclk; 2342 } 2343 2344 return 0; 2345 2346 err_disable_rxclk: 2347 clk_disable_unprepare(*rx_clk); 2348 err_disable_txsclk: 2349 clk_disable_unprepare(*txs_clk); 2350 err_disable_txclk: 2351 clk_disable_unprepare(*tx_clk); 2352 err_disable_axiclk: 2353 clk_disable_unprepare(*axi_clk); 2354 2355 return err; 2356 } 2357 2358 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2359 { 2360 clk_disable_unprepare(xdev->rxs_clk); 2361 clk_disable_unprepare(xdev->rx_clk); 2362 clk_disable_unprepare(xdev->txs_clk); 2363 clk_disable_unprepare(xdev->tx_clk); 2364 clk_disable_unprepare(xdev->axi_clk); 2365 } 2366 2367 /** 2368 * xilinx_dma_chan_probe - Per Channel Probing 2369 * It get channel features from the device tree entry and 2370 * initialize special channel handling routines 2371 * 2372 * @xdev: Driver specific device structure 2373 * @node: Device node 2374 * @chan_id: DMA Channel id 2375 * 2376 * Return: '0' on success and failure value on error 2377 */ 2378 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2379 struct device_node *node, int chan_id) 2380 { 2381 struct xilinx_dma_chan *chan; 2382 bool has_dre = false; 2383 u32 value, width; 2384 int err; 2385 2386 /* Allocate and initialize the channel structure */ 2387 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2388 if (!chan) 2389 return -ENOMEM; 2390 2391 chan->dev = xdev->dev; 2392 chan->xdev = xdev; 2393 chan->desc_pendingcount = 0x0; 2394 chan->ext_addr = xdev->ext_addr; 2395 /* This variable ensures that descriptors are not 2396 * Submitted when dma engine is in progress. This variable is 2397 * Added to avoid polling for a bit in the status register to 2398 * Know dma state in the driver hot path. 2399 */ 2400 chan->idle = true; 2401 2402 spin_lock_init(&chan->lock); 2403 INIT_LIST_HEAD(&chan->pending_list); 2404 INIT_LIST_HEAD(&chan->done_list); 2405 INIT_LIST_HEAD(&chan->active_list); 2406 INIT_LIST_HEAD(&chan->free_seg_list); 2407 2408 /* Retrieve the channel properties from the device tree */ 2409 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2410 2411 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2412 2413 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2414 if (err) { 2415 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2416 return err; 2417 } 2418 width = value >> 3; /* Convert bits to bytes */ 2419 2420 /* If data width is greater than 8 bytes, DRE is not in hw */ 2421 if (width > 8) 2422 has_dre = false; 2423 2424 if (!has_dre) 2425 xdev->common.copy_align = fls(width - 1); 2426 2427 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2428 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2429 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2430 chan->direction = DMA_MEM_TO_DEV; 2431 chan->id = chan_id; 2432 chan->tdest = chan_id; 2433 2434 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2435 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2436 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2437 chan->config.park = 1; 2438 2439 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2440 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2441 chan->flush_on_fsync = true; 2442 } 2443 } else if (of_device_is_compatible(node, 2444 "xlnx,axi-vdma-s2mm-channel") || 2445 of_device_is_compatible(node, 2446 "xlnx,axi-dma-s2mm-channel")) { 2447 chan->direction = DMA_DEV_TO_MEM; 2448 chan->id = chan_id; 2449 chan->tdest = chan_id - xdev->nr_channels; 2450 chan->has_vflip = of_property_read_bool(node, 2451 "xlnx,enable-vert-flip"); 2452 if (chan->has_vflip) { 2453 chan->config.vflip_en = dma_read(chan, 2454 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 2455 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2456 } 2457 2458 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2459 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2460 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2461 chan->config.park = 1; 2462 2463 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2464 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2465 chan->flush_on_fsync = true; 2466 } 2467 } else { 2468 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2469 return -EINVAL; 2470 } 2471 2472 /* Request the interrupt */ 2473 chan->irq = irq_of_parse_and_map(node, 0); 2474 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2475 "xilinx-dma-controller", chan); 2476 if (err) { 2477 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2478 return err; 2479 } 2480 2481 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2482 chan->start_transfer = xilinx_dma_start_transfer; 2483 chan->stop_transfer = xilinx_dma_stop_transfer; 2484 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2485 chan->start_transfer = xilinx_cdma_start_transfer; 2486 chan->stop_transfer = xilinx_cdma_stop_transfer; 2487 } else { 2488 chan->start_transfer = xilinx_vdma_start_transfer; 2489 chan->stop_transfer = xilinx_dma_stop_transfer; 2490 } 2491 2492 /* check if SG is enabled (only for AXIDMA and CDMA) */ 2493 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { 2494 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 2495 XILINX_DMA_DMASR_SG_MASK) 2496 chan->has_sg = true; 2497 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, 2498 chan->has_sg ? "enabled" : "disabled"); 2499 } 2500 2501 /* Initialize the tasklet */ 2502 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2503 (unsigned long)chan); 2504 2505 /* 2506 * Initialize the DMA channel and add it to the DMA engine channels 2507 * list. 2508 */ 2509 chan->common.device = &xdev->common; 2510 2511 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2512 xdev->chan[chan->id] = chan; 2513 2514 /* Reset the channel */ 2515 err = xilinx_dma_chan_reset(chan); 2516 if (err < 0) { 2517 dev_err(xdev->dev, "Reset channel failed\n"); 2518 return err; 2519 } 2520 2521 return 0; 2522 } 2523 2524 /** 2525 * xilinx_dma_child_probe - Per child node probe 2526 * It get number of dma-channels per child node from 2527 * device-tree and initializes all the channels. 2528 * 2529 * @xdev: Driver specific device structure 2530 * @node: Device node 2531 * 2532 * Return: 0 always. 2533 */ 2534 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2535 struct device_node *node) 2536 { 2537 int ret, i, nr_channels = 1; 2538 2539 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2540 if ((ret < 0) && xdev->mcdma) 2541 dev_warn(xdev->dev, "missing dma-channels property\n"); 2542 2543 for (i = 0; i < nr_channels; i++) 2544 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2545 2546 xdev->nr_channels += nr_channels; 2547 2548 return 0; 2549 } 2550 2551 /** 2552 * of_dma_xilinx_xlate - Translation function 2553 * @dma_spec: Pointer to DMA specifier as found in the device tree 2554 * @ofdma: Pointer to DMA controller data 2555 * 2556 * Return: DMA channel pointer on success and NULL on error 2557 */ 2558 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2559 struct of_dma *ofdma) 2560 { 2561 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2562 int chan_id = dma_spec->args[0]; 2563 2564 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2565 return NULL; 2566 2567 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2568 } 2569 2570 static const struct xilinx_dma_config axidma_config = { 2571 .dmatype = XDMA_TYPE_AXIDMA, 2572 .clk_init = axidma_clk_init, 2573 }; 2574 2575 static const struct xilinx_dma_config axicdma_config = { 2576 .dmatype = XDMA_TYPE_CDMA, 2577 .clk_init = axicdma_clk_init, 2578 }; 2579 2580 static const struct xilinx_dma_config axivdma_config = { 2581 .dmatype = XDMA_TYPE_VDMA, 2582 .clk_init = axivdma_clk_init, 2583 }; 2584 2585 static const struct of_device_id xilinx_dma_of_ids[] = { 2586 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2587 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2588 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2589 {} 2590 }; 2591 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2592 2593 /** 2594 * xilinx_dma_probe - Driver probe function 2595 * @pdev: Pointer to the platform_device structure 2596 * 2597 * Return: '0' on success and failure value on error 2598 */ 2599 static int xilinx_dma_probe(struct platform_device *pdev) 2600 { 2601 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2602 struct clk **, struct clk **, struct clk **) 2603 = axivdma_clk_init; 2604 struct device_node *node = pdev->dev.of_node; 2605 struct xilinx_dma_device *xdev; 2606 struct device_node *child, *np = pdev->dev.of_node; 2607 struct resource *io; 2608 u32 num_frames, addr_width, len_width; 2609 int i, err; 2610 2611 /* Allocate and initialize the DMA engine structure */ 2612 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2613 if (!xdev) 2614 return -ENOMEM; 2615 2616 xdev->dev = &pdev->dev; 2617 if (np) { 2618 const struct of_device_id *match; 2619 2620 match = of_match_node(xilinx_dma_of_ids, np); 2621 if (match && match->data) { 2622 xdev->dma_config = match->data; 2623 clk_init = xdev->dma_config->clk_init; 2624 } 2625 } 2626 2627 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2628 &xdev->rx_clk, &xdev->rxs_clk); 2629 if (err) 2630 return err; 2631 2632 /* Request and map I/O memory */ 2633 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2634 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2635 if (IS_ERR(xdev->regs)) 2636 return PTR_ERR(xdev->regs); 2637 2638 /* Retrieve the DMA engine properties from the device tree */ 2639 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 2640 2641 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2642 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2643 if (!of_property_read_u32(node, "xlnx,sg-length-width", 2644 &len_width)) { 2645 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 2646 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 2647 dev_warn(xdev->dev, 2648 "invalid xlnx,sg-length-width property value. Using default width\n"); 2649 } else { 2650 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 2651 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 2652 xdev->max_buffer_len = 2653 GENMASK(len_width - 1, 0); 2654 } 2655 } 2656 } 2657 2658 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2659 err = of_property_read_u32(node, "xlnx,num-fstores", 2660 &num_frames); 2661 if (err < 0) { 2662 dev_err(xdev->dev, 2663 "missing xlnx,num-fstores property\n"); 2664 return err; 2665 } 2666 2667 err = of_property_read_u32(node, "xlnx,flush-fsync", 2668 &xdev->flush_on_fsync); 2669 if (err < 0) 2670 dev_warn(xdev->dev, 2671 "missing xlnx,flush-fsync property\n"); 2672 } 2673 2674 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2675 if (err < 0) 2676 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2677 2678 if (addr_width > 32) 2679 xdev->ext_addr = true; 2680 else 2681 xdev->ext_addr = false; 2682 2683 /* Set the dma mask bits */ 2684 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2685 2686 /* Initialize the DMA engine */ 2687 xdev->common.dev = &pdev->dev; 2688 2689 INIT_LIST_HEAD(&xdev->common.channels); 2690 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2691 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2692 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2693 } 2694 2695 xdev->common.device_alloc_chan_resources = 2696 xilinx_dma_alloc_chan_resources; 2697 xdev->common.device_free_chan_resources = 2698 xilinx_dma_free_chan_resources; 2699 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2700 xdev->common.device_tx_status = xilinx_dma_tx_status; 2701 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2702 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2703 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2704 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2705 xdev->common.device_prep_dma_cyclic = 2706 xilinx_dma_prep_dma_cyclic; 2707 xdev->common.device_prep_interleaved_dma = 2708 xilinx_dma_prep_interleaved; 2709 /* Residue calculation is supported by only AXI DMA */ 2710 xdev->common.residue_granularity = 2711 DMA_RESIDUE_GRANULARITY_SEGMENT; 2712 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2713 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2714 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2715 } else { 2716 xdev->common.device_prep_interleaved_dma = 2717 xilinx_vdma_dma_prep_interleaved; 2718 } 2719 2720 platform_set_drvdata(pdev, xdev); 2721 2722 /* Initialize the channels */ 2723 for_each_child_of_node(node, child) { 2724 err = xilinx_dma_child_probe(xdev, child); 2725 if (err < 0) 2726 goto disable_clks; 2727 } 2728 2729 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2730 for (i = 0; i < xdev->nr_channels; i++) 2731 if (xdev->chan[i]) 2732 xdev->chan[i]->num_frms = num_frames; 2733 } 2734 2735 /* Register the DMA engine with the core */ 2736 dma_async_device_register(&xdev->common); 2737 2738 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2739 xdev); 2740 if (err < 0) { 2741 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2742 dma_async_device_unregister(&xdev->common); 2743 goto error; 2744 } 2745 2746 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2747 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 2748 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 2749 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 2750 else 2751 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2752 2753 return 0; 2754 2755 disable_clks: 2756 xdma_disable_allclks(xdev); 2757 error: 2758 for (i = 0; i < xdev->nr_channels; i++) 2759 if (xdev->chan[i]) 2760 xilinx_dma_chan_remove(xdev->chan[i]); 2761 2762 return err; 2763 } 2764 2765 /** 2766 * xilinx_dma_remove - Driver remove function 2767 * @pdev: Pointer to the platform_device structure 2768 * 2769 * Return: Always '0' 2770 */ 2771 static int xilinx_dma_remove(struct platform_device *pdev) 2772 { 2773 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2774 int i; 2775 2776 of_dma_controller_free(pdev->dev.of_node); 2777 2778 dma_async_device_unregister(&xdev->common); 2779 2780 for (i = 0; i < xdev->nr_channels; i++) 2781 if (xdev->chan[i]) 2782 xilinx_dma_chan_remove(xdev->chan[i]); 2783 2784 xdma_disable_allclks(xdev); 2785 2786 return 0; 2787 } 2788 2789 static struct platform_driver xilinx_vdma_driver = { 2790 .driver = { 2791 .name = "xilinx-vdma", 2792 .of_match_table = xilinx_dma_of_ids, 2793 }, 2794 .probe = xilinx_dma_probe, 2795 .remove = xilinx_dma_remove, 2796 }; 2797 2798 module_platform_driver(xilinx_vdma_driver); 2799 2800 MODULE_AUTHOR("Xilinx, Inc."); 2801 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2802 MODULE_LICENSE("GPL v2"); 2803