1 /* 2 * DMA driver for Xilinx Video DMA Engine 3 * 4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 5 * 6 * Based on the Freescale DMA driver. 7 * 8 * Description: 9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 10 * core that provides high-bandwidth direct memory access between memory 11 * and AXI4-Stream type video target peripherals. The core provides efficient 12 * two dimensional DMA operations with independent asynchronous read (S2MM) 13 * and write (MM2S) channel operation. It can be configured to have either 14 * one channel or two channels. If configured as two channels, one is to 15 * transmit to the video device (MM2S) and another is to receive from the 16 * video device (S2MM). Initialization, status, interrupt and management 17 * registers are accessed through an AXI4-Lite slave interface. 18 * 19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 20 * provides high-bandwidth one dimensional direct memory access between memory 21 * and AXI4-Stream target peripherals. It supports one receive and one 22 * transmit channel, both of them optional at synthesis time. 23 * 24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 25 * Access (DMA) between a memory-mapped source address and a memory-mapped 26 * destination address. 27 * 28 * This program is free software: you can redistribute it and/or modify 29 * it under the terms of the GNU General Public License as published by 30 * the Free Software Foundation, either version 2 of the License, or 31 * (at your option) any later version. 32 */ 33 34 #include <linux/bitops.h> 35 #include <linux/dmapool.h> 36 #include <linux/dma/xilinx_dma.h> 37 #include <linux/init.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/iopoll.h> 41 #include <linux/module.h> 42 #include <linux/of_address.h> 43 #include <linux/of_dma.h> 44 #include <linux/of_platform.h> 45 #include <linux/of_irq.h> 46 #include <linux/slab.h> 47 #include <linux/clk.h> 48 #include <linux/io-64-nonatomic-lo-hi.h> 49 50 #include "../dmaengine.h" 51 52 /* Register/Descriptor Offsets */ 53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 57 58 /* Control Registers */ 59 #define XILINX_DMA_REG_DMACR 0x0000 60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 71 #define XILINX_DMA_DMACR_RESET BIT(2) 72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 75 76 #define XILINX_DMA_REG_DMASR 0x0004 77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 89 #define XILINX_DMA_DMASR_SG_MASK BIT(3) 90 #define XILINX_DMA_DMASR_IDLE BIT(1) 91 #define XILINX_DMA_DMASR_HALTED BIT(0) 92 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 93 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 94 95 #define XILINX_DMA_REG_CURDESC 0x0008 96 #define XILINX_DMA_REG_TAILDESC 0x0010 97 #define XILINX_DMA_REG_REG_INDEX 0x0014 98 #define XILINX_DMA_REG_FRMSTORE 0x0018 99 #define XILINX_DMA_REG_THRESHOLD 0x001c 100 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 101 #define XILINX_DMA_REG_PARK_PTR 0x0028 102 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 103 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 104 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 105 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 106 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 107 108 /* Register Direct Mode Registers */ 109 #define XILINX_DMA_REG_VSIZE 0x0000 110 #define XILINX_DMA_REG_HSIZE 0x0004 111 112 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 113 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 114 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 115 116 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 117 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 118 119 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 120 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 121 122 /* HW specific definitions */ 123 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 124 125 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 126 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 127 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 128 XILINX_DMA_DMASR_ERR_IRQ) 129 130 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 131 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 132 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 133 XILINX_DMA_DMASR_SG_DEC_ERR | \ 134 XILINX_DMA_DMASR_SG_SLV_ERR | \ 135 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 136 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 137 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 138 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 139 XILINX_DMA_DMASR_DMA_INT_ERR) 140 141 /* 142 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 143 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 144 * is enabled in the h/w system. 145 */ 146 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 147 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 148 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 149 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 150 XILINX_DMA_DMASR_DMA_INT_ERR) 151 152 /* Axi VDMA Flush on Fsync bits */ 153 #define XILINX_DMA_FLUSH_S2MM 3 154 #define XILINX_DMA_FLUSH_MM2S 2 155 #define XILINX_DMA_FLUSH_BOTH 1 156 157 /* Delay loop counter to prevent hardware failure */ 158 #define XILINX_DMA_LOOP_COUNT 1000000 159 160 /* AXI DMA Specific Registers/Offsets */ 161 #define XILINX_DMA_REG_SRCDSTADDR 0x18 162 #define XILINX_DMA_REG_BTT 0x28 163 164 /* AXI DMA Specific Masks/Bit fields */ 165 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 166 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 167 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 168 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 169 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 170 #define XILINX_DMA_CR_COALESCE_SHIFT 16 171 #define XILINX_DMA_BD_SOP BIT(27) 172 #define XILINX_DMA_BD_EOP BIT(26) 173 #define XILINX_DMA_COALESCE_MAX 255 174 #define XILINX_DMA_NUM_DESCS 255 175 #define XILINX_DMA_NUM_APP_WORDS 5 176 177 /* Multi-Channel DMA Descriptor offsets*/ 178 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 179 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 180 181 /* Multi-Channel DMA Masks/Shifts */ 182 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 183 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 184 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 185 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 186 #define XILINX_DMA_BD_STRIDE_SHIFT 0 187 #define XILINX_DMA_BD_VSIZE_SHIFT 19 188 189 /* AXI CDMA Specific Registers/Offsets */ 190 #define XILINX_CDMA_REG_SRCADDR 0x18 191 #define XILINX_CDMA_REG_DSTADDR 0x20 192 193 /* AXI CDMA Specific Masks */ 194 #define XILINX_CDMA_CR_SGMODE BIT(3) 195 196 #define xilinx_prep_dma_addr_t(addr) \ 197 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) 198 /** 199 * struct xilinx_vdma_desc_hw - Hardware Descriptor 200 * @next_desc: Next Descriptor Pointer @0x00 201 * @pad1: Reserved @0x04 202 * @buf_addr: Buffer address @0x08 203 * @buf_addr_msb: MSB of Buffer address @0x0C 204 * @vsize: Vertical Size @0x10 205 * @hsize: Horizontal Size @0x14 206 * @stride: Number of bytes between the first 207 * pixels of each horizontal line @0x18 208 */ 209 struct xilinx_vdma_desc_hw { 210 u32 next_desc; 211 u32 pad1; 212 u32 buf_addr; 213 u32 buf_addr_msb; 214 u32 vsize; 215 u32 hsize; 216 u32 stride; 217 } __aligned(64); 218 219 /** 220 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 221 * @next_desc: Next Descriptor Pointer @0x00 222 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 223 * @buf_addr: Buffer address @0x08 224 * @buf_addr_msb: MSB of Buffer address @0x0C 225 * @mcdma_control: Control field for mcdma @0x10 226 * @vsize_stride: Vsize and Stride field for mcdma @0x14 227 * @control: Control field @0x18 228 * @status: Status field @0x1C 229 * @app: APP Fields @0x20 - 0x30 230 */ 231 struct xilinx_axidma_desc_hw { 232 u32 next_desc; 233 u32 next_desc_msb; 234 u32 buf_addr; 235 u32 buf_addr_msb; 236 u32 mcdma_control; 237 u32 vsize_stride; 238 u32 control; 239 u32 status; 240 u32 app[XILINX_DMA_NUM_APP_WORDS]; 241 } __aligned(64); 242 243 /** 244 * struct xilinx_cdma_desc_hw - Hardware Descriptor 245 * @next_desc: Next Descriptor Pointer @0x00 246 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 247 * @src_addr: Source address @0x08 248 * @src_addr_msb: Source address MSB @0x0C 249 * @dest_addr: Destination address @0x10 250 * @dest_addr_msb: Destination address MSB @0x14 251 * @control: Control field @0x18 252 * @status: Status field @0x1C 253 */ 254 struct xilinx_cdma_desc_hw { 255 u32 next_desc; 256 u32 next_desc_msb; 257 u32 src_addr; 258 u32 src_addr_msb; 259 u32 dest_addr; 260 u32 dest_addr_msb; 261 u32 control; 262 u32 status; 263 } __aligned(64); 264 265 /** 266 * struct xilinx_vdma_tx_segment - Descriptor segment 267 * @hw: Hardware descriptor 268 * @node: Node in the descriptor segments list 269 * @phys: Physical address of segment 270 */ 271 struct xilinx_vdma_tx_segment { 272 struct xilinx_vdma_desc_hw hw; 273 struct list_head node; 274 dma_addr_t phys; 275 } __aligned(64); 276 277 /** 278 * struct xilinx_axidma_tx_segment - Descriptor segment 279 * @hw: Hardware descriptor 280 * @node: Node in the descriptor segments list 281 * @phys: Physical address of segment 282 */ 283 struct xilinx_axidma_tx_segment { 284 struct xilinx_axidma_desc_hw hw; 285 struct list_head node; 286 dma_addr_t phys; 287 } __aligned(64); 288 289 /** 290 * struct xilinx_cdma_tx_segment - Descriptor segment 291 * @hw: Hardware descriptor 292 * @node: Node in the descriptor segments list 293 * @phys: Physical address of segment 294 */ 295 struct xilinx_cdma_tx_segment { 296 struct xilinx_cdma_desc_hw hw; 297 struct list_head node; 298 dma_addr_t phys; 299 } __aligned(64); 300 301 /** 302 * struct xilinx_dma_tx_descriptor - Per Transaction structure 303 * @async_tx: Async transaction descriptor 304 * @segments: TX segments list 305 * @node: Node in the channel descriptors list 306 * @cyclic: Check for cyclic transfers. 307 */ 308 struct xilinx_dma_tx_descriptor { 309 struct dma_async_tx_descriptor async_tx; 310 struct list_head segments; 311 struct list_head node; 312 bool cyclic; 313 }; 314 315 /** 316 * struct xilinx_dma_chan - Driver specific DMA channel structure 317 * @xdev: Driver specific device structure 318 * @ctrl_offset: Control registers offset 319 * @desc_offset: TX descriptor registers offset 320 * @lock: Descriptor operation lock 321 * @pending_list: Descriptors waiting 322 * @active_list: Descriptors ready to submit 323 * @done_list: Complete descriptors 324 * @free_seg_list: Free descriptors 325 * @common: DMA common channel 326 * @desc_pool: Descriptors pool 327 * @dev: The dma device 328 * @irq: Channel IRQ 329 * @id: Channel ID 330 * @direction: Transfer direction 331 * @num_frms: Number of frames 332 * @has_sg: Support scatter transfers 333 * @cyclic: Check for cyclic transfers. 334 * @genlock: Support genlock mode 335 * @err: Channel has errors 336 * @idle: Check for channel idle 337 * @tasklet: Cleanup work after irq 338 * @config: Device configuration info 339 * @flush_on_fsync: Flush on Frame sync 340 * @desc_pendingcount: Descriptor pending count 341 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 342 * @desc_submitcount: Descriptor h/w submitted count 343 * @residue: Residue for AXI DMA 344 * @seg_v: Statically allocated segments base 345 * @seg_p: Physical allocated segments base 346 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 347 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 348 * @start_transfer: Differentiate b/w DMA IP's transfer 349 * @stop_transfer: Differentiate b/w DMA IP's quiesce 350 * @tdest: TDEST value for mcdma 351 * @has_vflip: S2MM vertical flip 352 */ 353 struct xilinx_dma_chan { 354 struct xilinx_dma_device *xdev; 355 u32 ctrl_offset; 356 u32 desc_offset; 357 spinlock_t lock; 358 struct list_head pending_list; 359 struct list_head active_list; 360 struct list_head done_list; 361 struct list_head free_seg_list; 362 struct dma_chan common; 363 struct dma_pool *desc_pool; 364 struct device *dev; 365 int irq; 366 int id; 367 enum dma_transfer_direction direction; 368 int num_frms; 369 bool has_sg; 370 bool cyclic; 371 bool genlock; 372 bool err; 373 bool idle; 374 struct tasklet_struct tasklet; 375 struct xilinx_vdma_config config; 376 bool flush_on_fsync; 377 u32 desc_pendingcount; 378 bool ext_addr; 379 u32 desc_submitcount; 380 u32 residue; 381 struct xilinx_axidma_tx_segment *seg_v; 382 dma_addr_t seg_p; 383 struct xilinx_axidma_tx_segment *cyclic_seg_v; 384 dma_addr_t cyclic_seg_p; 385 void (*start_transfer)(struct xilinx_dma_chan *chan); 386 int (*stop_transfer)(struct xilinx_dma_chan *chan); 387 u16 tdest; 388 bool has_vflip; 389 }; 390 391 /** 392 * enum xdma_ip_type - DMA IP type. 393 * 394 * @XDMA_TYPE_AXIDMA: Axi dma ip. 395 * @XDMA_TYPE_CDMA: Axi cdma ip. 396 * @XDMA_TYPE_VDMA: Axi vdma ip. 397 * 398 */ 399 enum xdma_ip_type { 400 XDMA_TYPE_AXIDMA = 0, 401 XDMA_TYPE_CDMA, 402 XDMA_TYPE_VDMA, 403 }; 404 405 struct xilinx_dma_config { 406 enum xdma_ip_type dmatype; 407 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 408 struct clk **tx_clk, struct clk **txs_clk, 409 struct clk **rx_clk, struct clk **rxs_clk); 410 }; 411 412 /** 413 * struct xilinx_dma_device - DMA device structure 414 * @regs: I/O mapped base address 415 * @dev: Device Structure 416 * @common: DMA device structure 417 * @chan: Driver specific DMA channel 418 * @mcdma: Specifies whether Multi-Channel is present or not 419 * @flush_on_fsync: Flush on frame sync 420 * @ext_addr: Indicates 64 bit addressing is supported by dma device 421 * @pdev: Platform device structure pointer 422 * @dma_config: DMA config structure 423 * @axi_clk: DMA Axi4-lite interace clock 424 * @tx_clk: DMA mm2s clock 425 * @txs_clk: DMA mm2s stream clock 426 * @rx_clk: DMA s2mm clock 427 * @rxs_clk: DMA s2mm stream clock 428 * @nr_channels: Number of channels DMA device supports 429 * @chan_id: DMA channel identifier 430 * @max_buffer_len: Max buffer length 431 */ 432 struct xilinx_dma_device { 433 void __iomem *regs; 434 struct device *dev; 435 struct dma_device common; 436 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 437 bool mcdma; 438 u32 flush_on_fsync; 439 bool ext_addr; 440 struct platform_device *pdev; 441 const struct xilinx_dma_config *dma_config; 442 struct clk *axi_clk; 443 struct clk *tx_clk; 444 struct clk *txs_clk; 445 struct clk *rx_clk; 446 struct clk *rxs_clk; 447 u32 nr_channels; 448 u32 chan_id; 449 u32 max_buffer_len; 450 }; 451 452 /* Macros */ 453 #define to_xilinx_chan(chan) \ 454 container_of(chan, struct xilinx_dma_chan, common) 455 #define to_dma_tx_descriptor(tx) \ 456 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 457 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 458 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 459 cond, delay_us, timeout_us) 460 461 /* IO accessors */ 462 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 463 { 464 return ioread32(chan->xdev->regs + reg); 465 } 466 467 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 468 { 469 iowrite32(value, chan->xdev->regs + reg); 470 } 471 472 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 473 u32 value) 474 { 475 dma_write(chan, chan->desc_offset + reg, value); 476 } 477 478 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 479 { 480 return dma_read(chan, chan->ctrl_offset + reg); 481 } 482 483 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 484 u32 value) 485 { 486 dma_write(chan, chan->ctrl_offset + reg, value); 487 } 488 489 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 490 u32 clr) 491 { 492 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 493 } 494 495 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 496 u32 set) 497 { 498 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 499 } 500 501 /** 502 * vdma_desc_write_64 - 64-bit descriptor write 503 * @chan: Driver specific VDMA channel 504 * @reg: Register to write 505 * @value_lsb: lower address of the descriptor. 506 * @value_msb: upper address of the descriptor. 507 * 508 * Since vdma driver is trying to write to a register offset which is not a 509 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 510 * instead of a single 64 bit register write. 511 */ 512 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 513 u32 value_lsb, u32 value_msb) 514 { 515 /* Write the lsb 32 bits*/ 516 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 517 518 /* Write the msb 32 bits */ 519 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 520 } 521 522 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 523 { 524 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 525 } 526 527 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 528 dma_addr_t addr) 529 { 530 if (chan->ext_addr) 531 dma_writeq(chan, reg, addr); 532 else 533 dma_ctrl_write(chan, reg, addr); 534 } 535 536 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 537 struct xilinx_axidma_desc_hw *hw, 538 dma_addr_t buf_addr, size_t sg_used, 539 size_t period_len) 540 { 541 if (chan->ext_addr) { 542 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 543 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 544 period_len); 545 } else { 546 hw->buf_addr = buf_addr + sg_used + period_len; 547 } 548 } 549 550 /* ----------------------------------------------------------------------------- 551 * Descriptors and segments alloc and free 552 */ 553 554 /** 555 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 556 * @chan: Driver specific DMA channel 557 * 558 * Return: The allocated segment on success and NULL on failure. 559 */ 560 static struct xilinx_vdma_tx_segment * 561 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 562 { 563 struct xilinx_vdma_tx_segment *segment; 564 dma_addr_t phys; 565 566 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 567 if (!segment) 568 return NULL; 569 570 segment->phys = phys; 571 572 return segment; 573 } 574 575 /** 576 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 577 * @chan: Driver specific DMA channel 578 * 579 * Return: The allocated segment on success and NULL on failure. 580 */ 581 static struct xilinx_cdma_tx_segment * 582 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 583 { 584 struct xilinx_cdma_tx_segment *segment; 585 dma_addr_t phys; 586 587 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 588 if (!segment) 589 return NULL; 590 591 segment->phys = phys; 592 593 return segment; 594 } 595 596 /** 597 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 598 * @chan: Driver specific DMA channel 599 * 600 * Return: The allocated segment on success and NULL on failure. 601 */ 602 static struct xilinx_axidma_tx_segment * 603 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 604 { 605 struct xilinx_axidma_tx_segment *segment = NULL; 606 unsigned long flags; 607 608 spin_lock_irqsave(&chan->lock, flags); 609 if (!list_empty(&chan->free_seg_list)) { 610 segment = list_first_entry(&chan->free_seg_list, 611 struct xilinx_axidma_tx_segment, 612 node); 613 list_del(&segment->node); 614 } 615 spin_unlock_irqrestore(&chan->lock, flags); 616 617 return segment; 618 } 619 620 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 621 { 622 u32 next_desc = hw->next_desc; 623 u32 next_desc_msb = hw->next_desc_msb; 624 625 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 626 627 hw->next_desc = next_desc; 628 hw->next_desc_msb = next_desc_msb; 629 } 630 631 /** 632 * xilinx_dma_free_tx_segment - Free transaction segment 633 * @chan: Driver specific DMA channel 634 * @segment: DMA transaction segment 635 */ 636 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 637 struct xilinx_axidma_tx_segment *segment) 638 { 639 xilinx_dma_clean_hw_desc(&segment->hw); 640 641 list_add_tail(&segment->node, &chan->free_seg_list); 642 } 643 644 /** 645 * xilinx_cdma_free_tx_segment - Free transaction segment 646 * @chan: Driver specific DMA channel 647 * @segment: DMA transaction segment 648 */ 649 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 650 struct xilinx_cdma_tx_segment *segment) 651 { 652 dma_pool_free(chan->desc_pool, segment, segment->phys); 653 } 654 655 /** 656 * xilinx_vdma_free_tx_segment - Free transaction segment 657 * @chan: Driver specific DMA channel 658 * @segment: DMA transaction segment 659 */ 660 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 661 struct xilinx_vdma_tx_segment *segment) 662 { 663 dma_pool_free(chan->desc_pool, segment, segment->phys); 664 } 665 666 /** 667 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 668 * @chan: Driver specific DMA channel 669 * 670 * Return: The allocated descriptor on success and NULL on failure. 671 */ 672 static struct xilinx_dma_tx_descriptor * 673 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 674 { 675 struct xilinx_dma_tx_descriptor *desc; 676 677 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 678 if (!desc) 679 return NULL; 680 681 INIT_LIST_HEAD(&desc->segments); 682 683 return desc; 684 } 685 686 /** 687 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 688 * @chan: Driver specific DMA channel 689 * @desc: DMA transaction descriptor 690 */ 691 static void 692 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 693 struct xilinx_dma_tx_descriptor *desc) 694 { 695 struct xilinx_vdma_tx_segment *segment, *next; 696 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 697 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 698 699 if (!desc) 700 return; 701 702 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 703 list_for_each_entry_safe(segment, next, &desc->segments, node) { 704 list_del(&segment->node); 705 xilinx_vdma_free_tx_segment(chan, segment); 706 } 707 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 708 list_for_each_entry_safe(cdma_segment, cdma_next, 709 &desc->segments, node) { 710 list_del(&cdma_segment->node); 711 xilinx_cdma_free_tx_segment(chan, cdma_segment); 712 } 713 } else { 714 list_for_each_entry_safe(axidma_segment, axidma_next, 715 &desc->segments, node) { 716 list_del(&axidma_segment->node); 717 xilinx_dma_free_tx_segment(chan, axidma_segment); 718 } 719 } 720 721 kfree(desc); 722 } 723 724 /* Required functions */ 725 726 /** 727 * xilinx_dma_free_desc_list - Free descriptors list 728 * @chan: Driver specific DMA channel 729 * @list: List to parse and delete the descriptor 730 */ 731 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 732 struct list_head *list) 733 { 734 struct xilinx_dma_tx_descriptor *desc, *next; 735 736 list_for_each_entry_safe(desc, next, list, node) { 737 list_del(&desc->node); 738 xilinx_dma_free_tx_descriptor(chan, desc); 739 } 740 } 741 742 /** 743 * xilinx_dma_free_descriptors - Free channel descriptors 744 * @chan: Driver specific DMA channel 745 */ 746 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 747 { 748 unsigned long flags; 749 750 spin_lock_irqsave(&chan->lock, flags); 751 752 xilinx_dma_free_desc_list(chan, &chan->pending_list); 753 xilinx_dma_free_desc_list(chan, &chan->done_list); 754 xilinx_dma_free_desc_list(chan, &chan->active_list); 755 756 spin_unlock_irqrestore(&chan->lock, flags); 757 } 758 759 /** 760 * xilinx_dma_free_chan_resources - Free channel resources 761 * @dchan: DMA channel 762 */ 763 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 764 { 765 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 766 unsigned long flags; 767 768 dev_dbg(chan->dev, "Free all channel resources.\n"); 769 770 xilinx_dma_free_descriptors(chan); 771 772 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 773 spin_lock_irqsave(&chan->lock, flags); 774 INIT_LIST_HEAD(&chan->free_seg_list); 775 spin_unlock_irqrestore(&chan->lock, flags); 776 777 /* Free memory that is allocated for BD */ 778 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 779 XILINX_DMA_NUM_DESCS, chan->seg_v, 780 chan->seg_p); 781 782 /* Free Memory that is allocated for cyclic DMA Mode */ 783 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 784 chan->cyclic_seg_v, chan->cyclic_seg_p); 785 } 786 787 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 788 dma_pool_destroy(chan->desc_pool); 789 chan->desc_pool = NULL; 790 } 791 } 792 793 /** 794 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 795 * @chan: Driver specific dma channel 796 * @desc: dma transaction descriptor 797 * @flags: flags for spin lock 798 */ 799 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 800 struct xilinx_dma_tx_descriptor *desc, 801 unsigned long *flags) 802 { 803 dma_async_tx_callback callback; 804 void *callback_param; 805 806 callback = desc->async_tx.callback; 807 callback_param = desc->async_tx.callback_param; 808 if (callback) { 809 spin_unlock_irqrestore(&chan->lock, *flags); 810 callback(callback_param); 811 spin_lock_irqsave(&chan->lock, *flags); 812 } 813 } 814 815 /** 816 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 817 * @chan: Driver specific DMA channel 818 */ 819 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 820 { 821 struct xilinx_dma_tx_descriptor *desc, *next; 822 unsigned long flags; 823 824 spin_lock_irqsave(&chan->lock, flags); 825 826 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 827 struct dmaengine_desc_callback cb; 828 829 if (desc->cyclic) { 830 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 831 break; 832 } 833 834 /* Remove from the list of running transactions */ 835 list_del(&desc->node); 836 837 /* Run the link descriptor callback function */ 838 dmaengine_desc_get_callback(&desc->async_tx, &cb); 839 if (dmaengine_desc_callback_valid(&cb)) { 840 spin_unlock_irqrestore(&chan->lock, flags); 841 dmaengine_desc_callback_invoke(&cb, NULL); 842 spin_lock_irqsave(&chan->lock, flags); 843 } 844 845 /* Run any dependencies, then free the descriptor */ 846 dma_run_dependencies(&desc->async_tx); 847 xilinx_dma_free_tx_descriptor(chan, desc); 848 } 849 850 spin_unlock_irqrestore(&chan->lock, flags); 851 } 852 853 /** 854 * xilinx_dma_do_tasklet - Schedule completion tasklet 855 * @data: Pointer to the Xilinx DMA channel structure 856 */ 857 static void xilinx_dma_do_tasklet(unsigned long data) 858 { 859 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 860 861 xilinx_dma_chan_desc_cleanup(chan); 862 } 863 864 /** 865 * xilinx_dma_alloc_chan_resources - Allocate channel resources 866 * @dchan: DMA channel 867 * 868 * Return: '0' on success and failure value on error 869 */ 870 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 871 { 872 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 873 int i; 874 875 /* Has this channel already been allocated? */ 876 if (chan->desc_pool) 877 return 0; 878 879 /* 880 * We need the descriptor to be aligned to 64bytes 881 * for meeting Xilinx VDMA specification requirement. 882 */ 883 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 884 /* Allocate the buffer descriptors. */ 885 chan->seg_v = dma_alloc_coherent(chan->dev, 886 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 887 &chan->seg_p, GFP_KERNEL); 888 if (!chan->seg_v) { 889 dev_err(chan->dev, 890 "unable to allocate channel %d descriptors\n", 891 chan->id); 892 return -ENOMEM; 893 } 894 /* 895 * For cyclic DMA mode we need to program the tail Descriptor 896 * register with a value which is not a part of the BD chain 897 * so allocating a desc segment during channel allocation for 898 * programming tail descriptor. 899 */ 900 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 901 sizeof(*chan->cyclic_seg_v), 902 &chan->cyclic_seg_p, 903 GFP_KERNEL); 904 if (!chan->cyclic_seg_v) { 905 dev_err(chan->dev, 906 "unable to allocate desc segment for cyclic DMA\n"); 907 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 908 XILINX_DMA_NUM_DESCS, chan->seg_v, 909 chan->seg_p); 910 return -ENOMEM; 911 } 912 chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 913 914 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 915 chan->seg_v[i].hw.next_desc = 916 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 917 ((i + 1) % XILINX_DMA_NUM_DESCS)); 918 chan->seg_v[i].hw.next_desc_msb = 919 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 920 ((i + 1) % XILINX_DMA_NUM_DESCS)); 921 chan->seg_v[i].phys = chan->seg_p + 922 sizeof(*chan->seg_v) * i; 923 list_add_tail(&chan->seg_v[i].node, 924 &chan->free_seg_list); 925 } 926 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 927 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 928 chan->dev, 929 sizeof(struct xilinx_cdma_tx_segment), 930 __alignof__(struct xilinx_cdma_tx_segment), 931 0); 932 } else { 933 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 934 chan->dev, 935 sizeof(struct xilinx_vdma_tx_segment), 936 __alignof__(struct xilinx_vdma_tx_segment), 937 0); 938 } 939 940 if (!chan->desc_pool && 941 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 942 dev_err(chan->dev, 943 "unable to allocate channel %d descriptor pool\n", 944 chan->id); 945 return -ENOMEM; 946 } 947 948 dma_cookie_init(dchan); 949 950 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 951 /* For AXI DMA resetting once channel will reset the 952 * other channel as well so enable the interrupts here. 953 */ 954 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 955 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 956 } 957 958 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 959 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 960 XILINX_CDMA_CR_SGMODE); 961 962 return 0; 963 } 964 965 /** 966 * xilinx_dma_calc_copysize - Calculate the amount of data to copy 967 * @chan: Driver specific DMA channel 968 * @size: Total data that needs to be copied 969 * @done: Amount of data that has been already copied 970 * 971 * Return: Amount of data that has to be copied 972 */ 973 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, 974 int size, int done) 975 { 976 size_t copy; 977 978 copy = min_t(size_t, size - done, 979 chan->xdev->max_buffer_len); 980 981 if ((copy + done < size) && 982 chan->xdev->common.copy_align) { 983 /* 984 * If this is not the last descriptor, make sure 985 * the next one will be properly aligned 986 */ 987 copy = rounddown(copy, 988 (1 << chan->xdev->common.copy_align)); 989 } 990 return copy; 991 } 992 993 /** 994 * xilinx_dma_tx_status - Get DMA transaction status 995 * @dchan: DMA channel 996 * @cookie: Transaction identifier 997 * @txstate: Transaction state 998 * 999 * Return: DMA transaction status 1000 */ 1001 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 1002 dma_cookie_t cookie, 1003 struct dma_tx_state *txstate) 1004 { 1005 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1006 struct xilinx_dma_tx_descriptor *desc; 1007 struct xilinx_axidma_tx_segment *segment; 1008 struct xilinx_axidma_desc_hw *hw; 1009 enum dma_status ret; 1010 unsigned long flags; 1011 u32 residue = 0; 1012 1013 ret = dma_cookie_status(dchan, cookie, txstate); 1014 if (ret == DMA_COMPLETE || !txstate) 1015 return ret; 1016 1017 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1018 spin_lock_irqsave(&chan->lock, flags); 1019 1020 desc = list_last_entry(&chan->active_list, 1021 struct xilinx_dma_tx_descriptor, node); 1022 if (chan->has_sg) { 1023 list_for_each_entry(segment, &desc->segments, node) { 1024 hw = &segment->hw; 1025 residue += (hw->control - hw->status) & 1026 chan->xdev->max_buffer_len; 1027 } 1028 } 1029 spin_unlock_irqrestore(&chan->lock, flags); 1030 1031 chan->residue = residue; 1032 dma_set_residue(txstate, chan->residue); 1033 } 1034 1035 return ret; 1036 } 1037 1038 /** 1039 * xilinx_dma_stop_transfer - Halt DMA channel 1040 * @chan: Driver specific DMA channel 1041 * 1042 * Return: '0' on success and failure value on error 1043 */ 1044 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1045 { 1046 u32 val; 1047 1048 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1049 1050 /* Wait for the hardware to halt */ 1051 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1052 val & XILINX_DMA_DMASR_HALTED, 0, 1053 XILINX_DMA_LOOP_COUNT); 1054 } 1055 1056 /** 1057 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1058 * @chan: Driver specific DMA channel 1059 * 1060 * Return: '0' on success and failure value on error 1061 */ 1062 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1063 { 1064 u32 val; 1065 1066 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1067 val & XILINX_DMA_DMASR_IDLE, 0, 1068 XILINX_DMA_LOOP_COUNT); 1069 } 1070 1071 /** 1072 * xilinx_dma_start - Start DMA channel 1073 * @chan: Driver specific DMA channel 1074 */ 1075 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 1076 { 1077 int err; 1078 u32 val; 1079 1080 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1081 1082 /* Wait for the hardware to start */ 1083 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1084 !(val & XILINX_DMA_DMASR_HALTED), 0, 1085 XILINX_DMA_LOOP_COUNT); 1086 1087 if (err) { 1088 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1089 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1090 1091 chan->err = true; 1092 } 1093 } 1094 1095 /** 1096 * xilinx_vdma_start_transfer - Starts VDMA transfer 1097 * @chan: Driver specific channel struct pointer 1098 */ 1099 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1100 { 1101 struct xilinx_vdma_config *config = &chan->config; 1102 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1103 u32 reg, j; 1104 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1105 int i = 0; 1106 1107 /* This function was invoked with lock held */ 1108 if (chan->err) 1109 return; 1110 1111 if (!chan->idle) 1112 return; 1113 1114 if (list_empty(&chan->pending_list)) 1115 return; 1116 1117 desc = list_first_entry(&chan->pending_list, 1118 struct xilinx_dma_tx_descriptor, node); 1119 tail_desc = list_last_entry(&chan->pending_list, 1120 struct xilinx_dma_tx_descriptor, node); 1121 1122 /* Configure the hardware using info in the config structure */ 1123 if (chan->has_vflip) { 1124 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1125 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1126 reg |= config->vflip_en; 1127 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1128 reg); 1129 } 1130 1131 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1132 1133 if (config->frm_cnt_en) 1134 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1135 else 1136 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1137 1138 /* If not parking, enable circular mode */ 1139 if (config->park) 1140 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1141 else 1142 reg |= XILINX_DMA_DMACR_CIRC_EN; 1143 1144 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1145 1146 j = chan->desc_submitcount; 1147 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 1148 if (chan->direction == DMA_MEM_TO_DEV) { 1149 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 1150 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 1151 } else { 1152 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 1153 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 1154 } 1155 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 1156 1157 /* Start the hardware */ 1158 xilinx_dma_start(chan); 1159 1160 if (chan->err) 1161 return; 1162 1163 /* Start the transfer */ 1164 if (chan->desc_submitcount < chan->num_frms) 1165 i = chan->desc_submitcount; 1166 1167 list_for_each_entry(segment, &desc->segments, node) { 1168 if (chan->ext_addr) 1169 vdma_desc_write_64(chan, 1170 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1171 segment->hw.buf_addr, 1172 segment->hw.buf_addr_msb); 1173 else 1174 vdma_desc_write(chan, 1175 XILINX_VDMA_REG_START_ADDRESS(i++), 1176 segment->hw.buf_addr); 1177 1178 last = segment; 1179 } 1180 1181 if (!last) 1182 return; 1183 1184 /* HW expects these parameters to be same for one transaction */ 1185 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1186 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1187 last->hw.stride); 1188 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1189 1190 chan->desc_submitcount++; 1191 chan->desc_pendingcount--; 1192 list_del(&desc->node); 1193 list_add_tail(&desc->node, &chan->active_list); 1194 if (chan->desc_submitcount == chan->num_frms) 1195 chan->desc_submitcount = 0; 1196 1197 chan->idle = false; 1198 } 1199 1200 /** 1201 * xilinx_cdma_start_transfer - Starts cdma transfer 1202 * @chan: Driver specific channel struct pointer 1203 */ 1204 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1205 { 1206 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1207 struct xilinx_cdma_tx_segment *tail_segment; 1208 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1209 1210 if (chan->err) 1211 return; 1212 1213 if (!chan->idle) 1214 return; 1215 1216 if (list_empty(&chan->pending_list)) 1217 return; 1218 1219 head_desc = list_first_entry(&chan->pending_list, 1220 struct xilinx_dma_tx_descriptor, node); 1221 tail_desc = list_last_entry(&chan->pending_list, 1222 struct xilinx_dma_tx_descriptor, node); 1223 tail_segment = list_last_entry(&tail_desc->segments, 1224 struct xilinx_cdma_tx_segment, node); 1225 1226 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1227 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1228 ctrl_reg |= chan->desc_pendingcount << 1229 XILINX_DMA_CR_COALESCE_SHIFT; 1230 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1231 } 1232 1233 if (chan->has_sg) { 1234 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 1235 XILINX_CDMA_CR_SGMODE); 1236 1237 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1238 XILINX_CDMA_CR_SGMODE); 1239 1240 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1241 head_desc->async_tx.phys); 1242 1243 /* Update tail ptr register which will start the transfer */ 1244 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1245 tail_segment->phys); 1246 } else { 1247 /* In simple mode */ 1248 struct xilinx_cdma_tx_segment *segment; 1249 struct xilinx_cdma_desc_hw *hw; 1250 1251 segment = list_first_entry(&head_desc->segments, 1252 struct xilinx_cdma_tx_segment, 1253 node); 1254 1255 hw = &segment->hw; 1256 1257 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, 1258 xilinx_prep_dma_addr_t(hw->src_addr)); 1259 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, 1260 xilinx_prep_dma_addr_t(hw->dest_addr)); 1261 1262 /* Start the transfer */ 1263 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1264 hw->control & chan->xdev->max_buffer_len); 1265 } 1266 1267 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1268 chan->desc_pendingcount = 0; 1269 chan->idle = false; 1270 } 1271 1272 /** 1273 * xilinx_dma_start_transfer - Starts DMA transfer 1274 * @chan: Driver specific channel struct pointer 1275 */ 1276 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1277 { 1278 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1279 struct xilinx_axidma_tx_segment *tail_segment; 1280 u32 reg; 1281 1282 if (chan->err) 1283 return; 1284 1285 if (list_empty(&chan->pending_list)) 1286 return; 1287 1288 if (!chan->idle) 1289 return; 1290 1291 head_desc = list_first_entry(&chan->pending_list, 1292 struct xilinx_dma_tx_descriptor, node); 1293 tail_desc = list_last_entry(&chan->pending_list, 1294 struct xilinx_dma_tx_descriptor, node); 1295 tail_segment = list_last_entry(&tail_desc->segments, 1296 struct xilinx_axidma_tx_segment, node); 1297 1298 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1299 1300 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1301 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1302 reg |= chan->desc_pendingcount << 1303 XILINX_DMA_CR_COALESCE_SHIFT; 1304 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1305 } 1306 1307 if (chan->has_sg && !chan->xdev->mcdma) 1308 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1309 head_desc->async_tx.phys); 1310 1311 if (chan->has_sg && chan->xdev->mcdma) { 1312 if (chan->direction == DMA_MEM_TO_DEV) { 1313 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1314 head_desc->async_tx.phys); 1315 } else { 1316 if (!chan->tdest) { 1317 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1318 head_desc->async_tx.phys); 1319 } else { 1320 dma_ctrl_write(chan, 1321 XILINX_DMA_MCRX_CDESC(chan->tdest), 1322 head_desc->async_tx.phys); 1323 } 1324 } 1325 } 1326 1327 xilinx_dma_start(chan); 1328 1329 if (chan->err) 1330 return; 1331 1332 /* Start the transfer */ 1333 if (chan->has_sg && !chan->xdev->mcdma) { 1334 if (chan->cyclic) 1335 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1336 chan->cyclic_seg_v->phys); 1337 else 1338 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1339 tail_segment->phys); 1340 } else if (chan->has_sg && chan->xdev->mcdma) { 1341 if (chan->direction == DMA_MEM_TO_DEV) { 1342 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1343 tail_segment->phys); 1344 } else { 1345 if (!chan->tdest) { 1346 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1347 tail_segment->phys); 1348 } else { 1349 dma_ctrl_write(chan, 1350 XILINX_DMA_MCRX_TDESC(chan->tdest), 1351 tail_segment->phys); 1352 } 1353 } 1354 } else { 1355 struct xilinx_axidma_tx_segment *segment; 1356 struct xilinx_axidma_desc_hw *hw; 1357 1358 segment = list_first_entry(&head_desc->segments, 1359 struct xilinx_axidma_tx_segment, 1360 node); 1361 hw = &segment->hw; 1362 1363 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1364 1365 /* Start the transfer */ 1366 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1367 hw->control & chan->xdev->max_buffer_len); 1368 } 1369 1370 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1371 chan->desc_pendingcount = 0; 1372 chan->idle = false; 1373 } 1374 1375 /** 1376 * xilinx_dma_issue_pending - Issue pending transactions 1377 * @dchan: DMA channel 1378 */ 1379 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1380 { 1381 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1382 unsigned long flags; 1383 1384 spin_lock_irqsave(&chan->lock, flags); 1385 chan->start_transfer(chan); 1386 spin_unlock_irqrestore(&chan->lock, flags); 1387 } 1388 1389 /** 1390 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1391 * @chan : xilinx DMA channel 1392 * 1393 * CONTEXT: hardirq 1394 */ 1395 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1396 { 1397 struct xilinx_dma_tx_descriptor *desc, *next; 1398 1399 /* This function was invoked with lock held */ 1400 if (list_empty(&chan->active_list)) 1401 return; 1402 1403 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1404 list_del(&desc->node); 1405 if (!desc->cyclic) 1406 dma_cookie_complete(&desc->async_tx); 1407 list_add_tail(&desc->node, &chan->done_list); 1408 } 1409 } 1410 1411 /** 1412 * xilinx_dma_reset - Reset DMA channel 1413 * @chan: Driver specific DMA channel 1414 * 1415 * Return: '0' on success and failure value on error 1416 */ 1417 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1418 { 1419 int err; 1420 u32 tmp; 1421 1422 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1423 1424 /* Wait for the hardware to finish reset */ 1425 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1426 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1427 XILINX_DMA_LOOP_COUNT); 1428 1429 if (err) { 1430 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1431 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1432 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1433 return -ETIMEDOUT; 1434 } 1435 1436 chan->err = false; 1437 chan->idle = true; 1438 chan->desc_submitcount = 0; 1439 1440 return err; 1441 } 1442 1443 /** 1444 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1445 * @chan: Driver specific DMA channel 1446 * 1447 * Return: '0' on success and failure value on error 1448 */ 1449 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1450 { 1451 int err; 1452 1453 /* Reset VDMA */ 1454 err = xilinx_dma_reset(chan); 1455 if (err) 1456 return err; 1457 1458 /* Enable interrupts */ 1459 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1460 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1461 1462 return 0; 1463 } 1464 1465 /** 1466 * xilinx_dma_irq_handler - DMA Interrupt handler 1467 * @irq: IRQ number 1468 * @data: Pointer to the Xilinx DMA channel structure 1469 * 1470 * Return: IRQ_HANDLED/IRQ_NONE 1471 */ 1472 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1473 { 1474 struct xilinx_dma_chan *chan = data; 1475 u32 status; 1476 1477 /* Read the status and ack the interrupts. */ 1478 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1479 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1480 return IRQ_NONE; 1481 1482 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1483 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1484 1485 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1486 /* 1487 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1488 * error is recoverable, ignore it. Otherwise flag the error. 1489 * 1490 * Only recoverable errors can be cleared in the DMASR register, 1491 * make sure not to write to other error bits to 1. 1492 */ 1493 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1494 1495 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1496 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1497 1498 if (!chan->flush_on_fsync || 1499 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1500 dev_err(chan->dev, 1501 "Channel %p has errors %x, cdr %x tdr %x\n", 1502 chan, errors, 1503 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1504 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1505 chan->err = true; 1506 } 1507 } 1508 1509 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1510 /* 1511 * Device takes too long to do the transfer when user requires 1512 * responsiveness. 1513 */ 1514 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1515 } 1516 1517 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1518 spin_lock(&chan->lock); 1519 xilinx_dma_complete_descriptor(chan); 1520 chan->idle = true; 1521 chan->start_transfer(chan); 1522 spin_unlock(&chan->lock); 1523 } 1524 1525 tasklet_schedule(&chan->tasklet); 1526 return IRQ_HANDLED; 1527 } 1528 1529 /** 1530 * append_desc_queue - Queuing descriptor 1531 * @chan: Driver specific dma channel 1532 * @desc: dma transaction descriptor 1533 */ 1534 static void append_desc_queue(struct xilinx_dma_chan *chan, 1535 struct xilinx_dma_tx_descriptor *desc) 1536 { 1537 struct xilinx_vdma_tx_segment *tail_segment; 1538 struct xilinx_dma_tx_descriptor *tail_desc; 1539 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1540 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1541 1542 if (list_empty(&chan->pending_list)) 1543 goto append; 1544 1545 /* 1546 * Add the hardware descriptor to the chain of hardware descriptors 1547 * that already exists in memory. 1548 */ 1549 tail_desc = list_last_entry(&chan->pending_list, 1550 struct xilinx_dma_tx_descriptor, node); 1551 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1552 tail_segment = list_last_entry(&tail_desc->segments, 1553 struct xilinx_vdma_tx_segment, 1554 node); 1555 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1556 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1557 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1558 struct xilinx_cdma_tx_segment, 1559 node); 1560 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1561 } else { 1562 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1563 struct xilinx_axidma_tx_segment, 1564 node); 1565 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1566 } 1567 1568 /* 1569 * Add the software descriptor and all children to the list 1570 * of pending transactions 1571 */ 1572 append: 1573 list_add_tail(&desc->node, &chan->pending_list); 1574 chan->desc_pendingcount++; 1575 1576 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1577 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1578 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1579 chan->desc_pendingcount = chan->num_frms; 1580 } 1581 } 1582 1583 /** 1584 * xilinx_dma_tx_submit - Submit DMA transaction 1585 * @tx: Async transaction descriptor 1586 * 1587 * Return: cookie value on success and failure value on error 1588 */ 1589 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1590 { 1591 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1592 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1593 dma_cookie_t cookie; 1594 unsigned long flags; 1595 int err; 1596 1597 if (chan->cyclic) { 1598 xilinx_dma_free_tx_descriptor(chan, desc); 1599 return -EBUSY; 1600 } 1601 1602 if (chan->err) { 1603 /* 1604 * If reset fails, need to hard reset the system. 1605 * Channel is no longer functional 1606 */ 1607 err = xilinx_dma_chan_reset(chan); 1608 if (err < 0) 1609 return err; 1610 } 1611 1612 spin_lock_irqsave(&chan->lock, flags); 1613 1614 cookie = dma_cookie_assign(tx); 1615 1616 /* Put this transaction onto the tail of the pending queue */ 1617 append_desc_queue(chan, desc); 1618 1619 if (desc->cyclic) 1620 chan->cyclic = true; 1621 1622 spin_unlock_irqrestore(&chan->lock, flags); 1623 1624 return cookie; 1625 } 1626 1627 /** 1628 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1629 * DMA_SLAVE transaction 1630 * @dchan: DMA channel 1631 * @xt: Interleaved template pointer 1632 * @flags: transfer ack flags 1633 * 1634 * Return: Async transaction descriptor on success and NULL on failure 1635 */ 1636 static struct dma_async_tx_descriptor * 1637 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1638 struct dma_interleaved_template *xt, 1639 unsigned long flags) 1640 { 1641 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1642 struct xilinx_dma_tx_descriptor *desc; 1643 struct xilinx_vdma_tx_segment *segment; 1644 struct xilinx_vdma_desc_hw *hw; 1645 1646 if (!is_slave_direction(xt->dir)) 1647 return NULL; 1648 1649 if (!xt->numf || !xt->sgl[0].size) 1650 return NULL; 1651 1652 if (xt->frame_size != 1) 1653 return NULL; 1654 1655 /* Allocate a transaction descriptor. */ 1656 desc = xilinx_dma_alloc_tx_descriptor(chan); 1657 if (!desc) 1658 return NULL; 1659 1660 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1661 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1662 async_tx_ack(&desc->async_tx); 1663 1664 /* Allocate the link descriptor from DMA pool */ 1665 segment = xilinx_vdma_alloc_tx_segment(chan); 1666 if (!segment) 1667 goto error; 1668 1669 /* Fill in the hardware descriptor */ 1670 hw = &segment->hw; 1671 hw->vsize = xt->numf; 1672 hw->hsize = xt->sgl[0].size; 1673 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1674 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1675 hw->stride |= chan->config.frm_dly << 1676 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1677 1678 if (xt->dir != DMA_MEM_TO_DEV) { 1679 if (chan->ext_addr) { 1680 hw->buf_addr = lower_32_bits(xt->dst_start); 1681 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1682 } else { 1683 hw->buf_addr = xt->dst_start; 1684 } 1685 } else { 1686 if (chan->ext_addr) { 1687 hw->buf_addr = lower_32_bits(xt->src_start); 1688 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1689 } else { 1690 hw->buf_addr = xt->src_start; 1691 } 1692 } 1693 1694 /* Insert the segment into the descriptor segments list. */ 1695 list_add_tail(&segment->node, &desc->segments); 1696 1697 /* Link the last hardware descriptor with the first. */ 1698 segment = list_first_entry(&desc->segments, 1699 struct xilinx_vdma_tx_segment, node); 1700 desc->async_tx.phys = segment->phys; 1701 1702 return &desc->async_tx; 1703 1704 error: 1705 xilinx_dma_free_tx_descriptor(chan, desc); 1706 return NULL; 1707 } 1708 1709 /** 1710 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1711 * @dchan: DMA channel 1712 * @dma_dst: destination address 1713 * @dma_src: source address 1714 * @len: transfer length 1715 * @flags: transfer ack flags 1716 * 1717 * Return: Async transaction descriptor on success and NULL on failure 1718 */ 1719 static struct dma_async_tx_descriptor * 1720 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1721 dma_addr_t dma_src, size_t len, unsigned long flags) 1722 { 1723 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1724 struct xilinx_dma_tx_descriptor *desc; 1725 struct xilinx_cdma_tx_segment *segment; 1726 struct xilinx_cdma_desc_hw *hw; 1727 1728 if (!len || len > chan->xdev->max_buffer_len) 1729 return NULL; 1730 1731 desc = xilinx_dma_alloc_tx_descriptor(chan); 1732 if (!desc) 1733 return NULL; 1734 1735 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1736 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1737 1738 /* Allocate the link descriptor from DMA pool */ 1739 segment = xilinx_cdma_alloc_tx_segment(chan); 1740 if (!segment) 1741 goto error; 1742 1743 hw = &segment->hw; 1744 hw->control = len; 1745 hw->src_addr = dma_src; 1746 hw->dest_addr = dma_dst; 1747 if (chan->ext_addr) { 1748 hw->src_addr_msb = upper_32_bits(dma_src); 1749 hw->dest_addr_msb = upper_32_bits(dma_dst); 1750 } 1751 1752 /* Insert the segment into the descriptor segments list. */ 1753 list_add_tail(&segment->node, &desc->segments); 1754 1755 desc->async_tx.phys = segment->phys; 1756 hw->next_desc = segment->phys; 1757 1758 return &desc->async_tx; 1759 1760 error: 1761 xilinx_dma_free_tx_descriptor(chan, desc); 1762 return NULL; 1763 } 1764 1765 /** 1766 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1767 * @dchan: DMA channel 1768 * @sgl: scatterlist to transfer to/from 1769 * @sg_len: number of entries in @scatterlist 1770 * @direction: DMA direction 1771 * @flags: transfer ack flags 1772 * @context: APP words of the descriptor 1773 * 1774 * Return: Async transaction descriptor on success and NULL on failure 1775 */ 1776 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1777 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1778 enum dma_transfer_direction direction, unsigned long flags, 1779 void *context) 1780 { 1781 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1782 struct xilinx_dma_tx_descriptor *desc; 1783 struct xilinx_axidma_tx_segment *segment = NULL; 1784 u32 *app_w = (u32 *)context; 1785 struct scatterlist *sg; 1786 size_t copy; 1787 size_t sg_used; 1788 unsigned int i; 1789 1790 if (!is_slave_direction(direction)) 1791 return NULL; 1792 1793 /* Allocate a transaction descriptor. */ 1794 desc = xilinx_dma_alloc_tx_descriptor(chan); 1795 if (!desc) 1796 return NULL; 1797 1798 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1799 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1800 1801 /* Build transactions using information in the scatter gather list */ 1802 for_each_sg(sgl, sg, sg_len, i) { 1803 sg_used = 0; 1804 1805 /* Loop until the entire scatterlist entry is used */ 1806 while (sg_used < sg_dma_len(sg)) { 1807 struct xilinx_axidma_desc_hw *hw; 1808 1809 /* Get a free segment */ 1810 segment = xilinx_axidma_alloc_tx_segment(chan); 1811 if (!segment) 1812 goto error; 1813 1814 /* 1815 * Calculate the maximum number of bytes to transfer, 1816 * making sure it is less than the hw limit 1817 */ 1818 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), 1819 sg_used); 1820 hw = &segment->hw; 1821 1822 /* Fill in the descriptor */ 1823 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1824 sg_used, 0); 1825 1826 hw->control = copy; 1827 1828 if (chan->direction == DMA_MEM_TO_DEV) { 1829 if (app_w) 1830 memcpy(hw->app, app_w, sizeof(u32) * 1831 XILINX_DMA_NUM_APP_WORDS); 1832 } 1833 1834 sg_used += copy; 1835 1836 /* 1837 * Insert the segment into the descriptor segments 1838 * list. 1839 */ 1840 list_add_tail(&segment->node, &desc->segments); 1841 } 1842 } 1843 1844 segment = list_first_entry(&desc->segments, 1845 struct xilinx_axidma_tx_segment, node); 1846 desc->async_tx.phys = segment->phys; 1847 1848 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1849 if (chan->direction == DMA_MEM_TO_DEV) { 1850 segment->hw.control |= XILINX_DMA_BD_SOP; 1851 segment = list_last_entry(&desc->segments, 1852 struct xilinx_axidma_tx_segment, 1853 node); 1854 segment->hw.control |= XILINX_DMA_BD_EOP; 1855 } 1856 1857 return &desc->async_tx; 1858 1859 error: 1860 xilinx_dma_free_tx_descriptor(chan, desc); 1861 return NULL; 1862 } 1863 1864 /** 1865 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1866 * @dchan: DMA channel 1867 * @buf_addr: Physical address of the buffer 1868 * @buf_len: Total length of the cyclic buffers 1869 * @period_len: length of individual cyclic buffer 1870 * @direction: DMA direction 1871 * @flags: transfer ack flags 1872 * 1873 * Return: Async transaction descriptor on success and NULL on failure 1874 */ 1875 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1876 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1877 size_t period_len, enum dma_transfer_direction direction, 1878 unsigned long flags) 1879 { 1880 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1881 struct xilinx_dma_tx_descriptor *desc; 1882 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1883 size_t copy, sg_used; 1884 unsigned int num_periods; 1885 int i; 1886 u32 reg; 1887 1888 if (!period_len) 1889 return NULL; 1890 1891 num_periods = buf_len / period_len; 1892 1893 if (!num_periods) 1894 return NULL; 1895 1896 if (!is_slave_direction(direction)) 1897 return NULL; 1898 1899 /* Allocate a transaction descriptor. */ 1900 desc = xilinx_dma_alloc_tx_descriptor(chan); 1901 if (!desc) 1902 return NULL; 1903 1904 chan->direction = direction; 1905 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1906 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1907 1908 for (i = 0; i < num_periods; ++i) { 1909 sg_used = 0; 1910 1911 while (sg_used < period_len) { 1912 struct xilinx_axidma_desc_hw *hw; 1913 1914 /* Get a free segment */ 1915 segment = xilinx_axidma_alloc_tx_segment(chan); 1916 if (!segment) 1917 goto error; 1918 1919 /* 1920 * Calculate the maximum number of bytes to transfer, 1921 * making sure it is less than the hw limit 1922 */ 1923 copy = xilinx_dma_calc_copysize(chan, period_len, 1924 sg_used); 1925 hw = &segment->hw; 1926 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1927 period_len * i); 1928 hw->control = copy; 1929 1930 if (prev) 1931 prev->hw.next_desc = segment->phys; 1932 1933 prev = segment; 1934 sg_used += copy; 1935 1936 /* 1937 * Insert the segment into the descriptor segments 1938 * list. 1939 */ 1940 list_add_tail(&segment->node, &desc->segments); 1941 } 1942 } 1943 1944 head_segment = list_first_entry(&desc->segments, 1945 struct xilinx_axidma_tx_segment, node); 1946 desc->async_tx.phys = head_segment->phys; 1947 1948 desc->cyclic = true; 1949 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1950 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1951 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1952 1953 segment = list_last_entry(&desc->segments, 1954 struct xilinx_axidma_tx_segment, 1955 node); 1956 segment->hw.next_desc = (u32) head_segment->phys; 1957 1958 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1959 if (direction == DMA_MEM_TO_DEV) { 1960 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1961 segment->hw.control |= XILINX_DMA_BD_EOP; 1962 } 1963 1964 return &desc->async_tx; 1965 1966 error: 1967 xilinx_dma_free_tx_descriptor(chan, desc); 1968 return NULL; 1969 } 1970 1971 /** 1972 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1973 * DMA_SLAVE transaction 1974 * @dchan: DMA channel 1975 * @xt: Interleaved template pointer 1976 * @flags: transfer ack flags 1977 * 1978 * Return: Async transaction descriptor on success and NULL on failure 1979 */ 1980 static struct dma_async_tx_descriptor * 1981 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1982 struct dma_interleaved_template *xt, 1983 unsigned long flags) 1984 { 1985 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1986 struct xilinx_dma_tx_descriptor *desc; 1987 struct xilinx_axidma_tx_segment *segment; 1988 struct xilinx_axidma_desc_hw *hw; 1989 1990 if (!is_slave_direction(xt->dir)) 1991 return NULL; 1992 1993 if (!xt->numf || !xt->sgl[0].size) 1994 return NULL; 1995 1996 if (xt->frame_size != 1) 1997 return NULL; 1998 1999 /* Allocate a transaction descriptor. */ 2000 desc = xilinx_dma_alloc_tx_descriptor(chan); 2001 if (!desc) 2002 return NULL; 2003 2004 chan->direction = xt->dir; 2005 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2006 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2007 2008 /* Get a free segment */ 2009 segment = xilinx_axidma_alloc_tx_segment(chan); 2010 if (!segment) 2011 goto error; 2012 2013 hw = &segment->hw; 2014 2015 /* Fill in the descriptor */ 2016 if (xt->dir != DMA_MEM_TO_DEV) 2017 hw->buf_addr = xt->dst_start; 2018 else 2019 hw->buf_addr = xt->src_start; 2020 2021 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 2022 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 2023 XILINX_DMA_BD_VSIZE_MASK; 2024 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 2025 XILINX_DMA_BD_STRIDE_MASK; 2026 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 2027 2028 /* 2029 * Insert the segment into the descriptor segments 2030 * list. 2031 */ 2032 list_add_tail(&segment->node, &desc->segments); 2033 2034 2035 segment = list_first_entry(&desc->segments, 2036 struct xilinx_axidma_tx_segment, node); 2037 desc->async_tx.phys = segment->phys; 2038 2039 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2040 if (xt->dir == DMA_MEM_TO_DEV) { 2041 segment->hw.control |= XILINX_DMA_BD_SOP; 2042 segment = list_last_entry(&desc->segments, 2043 struct xilinx_axidma_tx_segment, 2044 node); 2045 segment->hw.control |= XILINX_DMA_BD_EOP; 2046 } 2047 2048 return &desc->async_tx; 2049 2050 error: 2051 xilinx_dma_free_tx_descriptor(chan, desc); 2052 return NULL; 2053 } 2054 2055 /** 2056 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2057 * @dchan: Driver specific DMA Channel pointer 2058 * 2059 * Return: '0' always. 2060 */ 2061 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2062 { 2063 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2064 u32 reg; 2065 int err; 2066 2067 if (chan->cyclic) 2068 xilinx_dma_chan_reset(chan); 2069 2070 err = chan->stop_transfer(chan); 2071 if (err) { 2072 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2073 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 2074 chan->err = true; 2075 } 2076 2077 /* Remove and free all of the descriptors in the lists */ 2078 xilinx_dma_free_descriptors(chan); 2079 chan->idle = true; 2080 2081 if (chan->cyclic) { 2082 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2083 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2084 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2085 chan->cyclic = false; 2086 } 2087 2088 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 2089 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2090 XILINX_CDMA_CR_SGMODE); 2091 2092 return 0; 2093 } 2094 2095 /** 2096 * xilinx_dma_channel_set_config - Configure VDMA channel 2097 * Run-time configuration for Axi VDMA, supports: 2098 * . halt the channel 2099 * . configure interrupt coalescing and inter-packet delay threshold 2100 * . start/stop parking 2101 * . enable genlock 2102 * 2103 * @dchan: DMA channel 2104 * @cfg: VDMA device configuration pointer 2105 * 2106 * Return: '0' on success and failure value on error 2107 */ 2108 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2109 struct xilinx_vdma_config *cfg) 2110 { 2111 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2112 u32 dmacr; 2113 2114 if (cfg->reset) 2115 return xilinx_dma_chan_reset(chan); 2116 2117 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2118 2119 chan->config.frm_dly = cfg->frm_dly; 2120 chan->config.park = cfg->park; 2121 2122 /* genlock settings */ 2123 chan->config.gen_lock = cfg->gen_lock; 2124 chan->config.master = cfg->master; 2125 2126 if (cfg->gen_lock && chan->genlock) { 2127 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2128 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2129 } 2130 2131 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2132 chan->config.vflip_en = cfg->vflip_en; 2133 2134 if (cfg->park) 2135 chan->config.park_frm = cfg->park_frm; 2136 else 2137 chan->config.park_frm = -1; 2138 2139 chan->config.coalesc = cfg->coalesc; 2140 chan->config.delay = cfg->delay; 2141 2142 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2143 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2144 chan->config.coalesc = cfg->coalesc; 2145 } 2146 2147 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2148 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2149 chan->config.delay = cfg->delay; 2150 } 2151 2152 /* FSync Source selection */ 2153 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2154 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2155 2156 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2157 2158 return 0; 2159 } 2160 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2161 2162 /* ----------------------------------------------------------------------------- 2163 * Probe and remove 2164 */ 2165 2166 /** 2167 * xilinx_dma_chan_remove - Per Channel remove function 2168 * @chan: Driver specific DMA channel 2169 */ 2170 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2171 { 2172 /* Disable all interrupts */ 2173 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2174 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2175 2176 if (chan->irq > 0) 2177 free_irq(chan->irq, chan); 2178 2179 tasklet_kill(&chan->tasklet); 2180 2181 list_del(&chan->common.device_node); 2182 } 2183 2184 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2185 struct clk **tx_clk, struct clk **rx_clk, 2186 struct clk **sg_clk, struct clk **tmp_clk) 2187 { 2188 int err; 2189 2190 *tmp_clk = NULL; 2191 2192 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2193 if (IS_ERR(*axi_clk)) { 2194 err = PTR_ERR(*axi_clk); 2195 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2196 return err; 2197 } 2198 2199 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2200 if (IS_ERR(*tx_clk)) 2201 *tx_clk = NULL; 2202 2203 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2204 if (IS_ERR(*rx_clk)) 2205 *rx_clk = NULL; 2206 2207 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2208 if (IS_ERR(*sg_clk)) 2209 *sg_clk = NULL; 2210 2211 err = clk_prepare_enable(*axi_clk); 2212 if (err) { 2213 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2214 return err; 2215 } 2216 2217 err = clk_prepare_enable(*tx_clk); 2218 if (err) { 2219 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2220 goto err_disable_axiclk; 2221 } 2222 2223 err = clk_prepare_enable(*rx_clk); 2224 if (err) { 2225 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2226 goto err_disable_txclk; 2227 } 2228 2229 err = clk_prepare_enable(*sg_clk); 2230 if (err) { 2231 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2232 goto err_disable_rxclk; 2233 } 2234 2235 return 0; 2236 2237 err_disable_rxclk: 2238 clk_disable_unprepare(*rx_clk); 2239 err_disable_txclk: 2240 clk_disable_unprepare(*tx_clk); 2241 err_disable_axiclk: 2242 clk_disable_unprepare(*axi_clk); 2243 2244 return err; 2245 } 2246 2247 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2248 struct clk **dev_clk, struct clk **tmp_clk, 2249 struct clk **tmp1_clk, struct clk **tmp2_clk) 2250 { 2251 int err; 2252 2253 *tmp_clk = NULL; 2254 *tmp1_clk = NULL; 2255 *tmp2_clk = NULL; 2256 2257 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2258 if (IS_ERR(*axi_clk)) { 2259 err = PTR_ERR(*axi_clk); 2260 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 2261 return err; 2262 } 2263 2264 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2265 if (IS_ERR(*dev_clk)) { 2266 err = PTR_ERR(*dev_clk); 2267 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 2268 return err; 2269 } 2270 2271 err = clk_prepare_enable(*axi_clk); 2272 if (err) { 2273 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2274 return err; 2275 } 2276 2277 err = clk_prepare_enable(*dev_clk); 2278 if (err) { 2279 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2280 goto err_disable_axiclk; 2281 } 2282 2283 return 0; 2284 2285 err_disable_axiclk: 2286 clk_disable_unprepare(*axi_clk); 2287 2288 return err; 2289 } 2290 2291 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2292 struct clk **tx_clk, struct clk **txs_clk, 2293 struct clk **rx_clk, struct clk **rxs_clk) 2294 { 2295 int err; 2296 2297 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2298 if (IS_ERR(*axi_clk)) { 2299 err = PTR_ERR(*axi_clk); 2300 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2301 return err; 2302 } 2303 2304 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2305 if (IS_ERR(*tx_clk)) 2306 *tx_clk = NULL; 2307 2308 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2309 if (IS_ERR(*txs_clk)) 2310 *txs_clk = NULL; 2311 2312 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2313 if (IS_ERR(*rx_clk)) 2314 *rx_clk = NULL; 2315 2316 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2317 if (IS_ERR(*rxs_clk)) 2318 *rxs_clk = NULL; 2319 2320 err = clk_prepare_enable(*axi_clk); 2321 if (err) { 2322 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2323 return err; 2324 } 2325 2326 err = clk_prepare_enable(*tx_clk); 2327 if (err) { 2328 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2329 goto err_disable_axiclk; 2330 } 2331 2332 err = clk_prepare_enable(*txs_clk); 2333 if (err) { 2334 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2335 goto err_disable_txclk; 2336 } 2337 2338 err = clk_prepare_enable(*rx_clk); 2339 if (err) { 2340 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2341 goto err_disable_txsclk; 2342 } 2343 2344 err = clk_prepare_enable(*rxs_clk); 2345 if (err) { 2346 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2347 goto err_disable_rxclk; 2348 } 2349 2350 return 0; 2351 2352 err_disable_rxclk: 2353 clk_disable_unprepare(*rx_clk); 2354 err_disable_txsclk: 2355 clk_disable_unprepare(*txs_clk); 2356 err_disable_txclk: 2357 clk_disable_unprepare(*tx_clk); 2358 err_disable_axiclk: 2359 clk_disable_unprepare(*axi_clk); 2360 2361 return err; 2362 } 2363 2364 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2365 { 2366 clk_disable_unprepare(xdev->rxs_clk); 2367 clk_disable_unprepare(xdev->rx_clk); 2368 clk_disable_unprepare(xdev->txs_clk); 2369 clk_disable_unprepare(xdev->tx_clk); 2370 clk_disable_unprepare(xdev->axi_clk); 2371 } 2372 2373 /** 2374 * xilinx_dma_chan_probe - Per Channel Probing 2375 * It get channel features from the device tree entry and 2376 * initialize special channel handling routines 2377 * 2378 * @xdev: Driver specific device structure 2379 * @node: Device node 2380 * @chan_id: DMA Channel id 2381 * 2382 * Return: '0' on success and failure value on error 2383 */ 2384 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2385 struct device_node *node, int chan_id) 2386 { 2387 struct xilinx_dma_chan *chan; 2388 bool has_dre = false; 2389 u32 value, width; 2390 int err; 2391 2392 /* Allocate and initialize the channel structure */ 2393 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2394 if (!chan) 2395 return -ENOMEM; 2396 2397 chan->dev = xdev->dev; 2398 chan->xdev = xdev; 2399 chan->desc_pendingcount = 0x0; 2400 chan->ext_addr = xdev->ext_addr; 2401 /* This variable ensures that descriptors are not 2402 * Submitted when dma engine is in progress. This variable is 2403 * Added to avoid polling for a bit in the status register to 2404 * Know dma state in the driver hot path. 2405 */ 2406 chan->idle = true; 2407 2408 spin_lock_init(&chan->lock); 2409 INIT_LIST_HEAD(&chan->pending_list); 2410 INIT_LIST_HEAD(&chan->done_list); 2411 INIT_LIST_HEAD(&chan->active_list); 2412 INIT_LIST_HEAD(&chan->free_seg_list); 2413 2414 /* Retrieve the channel properties from the device tree */ 2415 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2416 2417 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2418 2419 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2420 if (err) { 2421 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2422 return err; 2423 } 2424 width = value >> 3; /* Convert bits to bytes */ 2425 2426 /* If data width is greater than 8 bytes, DRE is not in hw */ 2427 if (width > 8) 2428 has_dre = false; 2429 2430 if (!has_dre) 2431 xdev->common.copy_align = fls(width - 1); 2432 2433 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2434 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2435 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2436 chan->direction = DMA_MEM_TO_DEV; 2437 chan->id = chan_id; 2438 chan->tdest = chan_id; 2439 2440 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2441 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2442 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2443 chan->config.park = 1; 2444 2445 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2446 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2447 chan->flush_on_fsync = true; 2448 } 2449 } else if (of_device_is_compatible(node, 2450 "xlnx,axi-vdma-s2mm-channel") || 2451 of_device_is_compatible(node, 2452 "xlnx,axi-dma-s2mm-channel")) { 2453 chan->direction = DMA_DEV_TO_MEM; 2454 chan->id = chan_id; 2455 chan->tdest = chan_id - xdev->nr_channels; 2456 chan->has_vflip = of_property_read_bool(node, 2457 "xlnx,enable-vert-flip"); 2458 if (chan->has_vflip) { 2459 chan->config.vflip_en = dma_read(chan, 2460 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 2461 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2462 } 2463 2464 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2465 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2466 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2467 chan->config.park = 1; 2468 2469 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2470 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2471 chan->flush_on_fsync = true; 2472 } 2473 } else { 2474 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2475 return -EINVAL; 2476 } 2477 2478 /* Request the interrupt */ 2479 chan->irq = irq_of_parse_and_map(node, 0); 2480 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2481 "xilinx-dma-controller", chan); 2482 if (err) { 2483 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2484 return err; 2485 } 2486 2487 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2488 chan->start_transfer = xilinx_dma_start_transfer; 2489 chan->stop_transfer = xilinx_dma_stop_transfer; 2490 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2491 chan->start_transfer = xilinx_cdma_start_transfer; 2492 chan->stop_transfer = xilinx_cdma_stop_transfer; 2493 } else { 2494 chan->start_transfer = xilinx_vdma_start_transfer; 2495 chan->stop_transfer = xilinx_dma_stop_transfer; 2496 } 2497 2498 /* check if SG is enabled (only for AXIDMA and CDMA) */ 2499 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { 2500 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 2501 XILINX_DMA_DMASR_SG_MASK) 2502 chan->has_sg = true; 2503 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, 2504 chan->has_sg ? "enabled" : "disabled"); 2505 } 2506 2507 /* Initialize the tasklet */ 2508 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2509 (unsigned long)chan); 2510 2511 /* 2512 * Initialize the DMA channel and add it to the DMA engine channels 2513 * list. 2514 */ 2515 chan->common.device = &xdev->common; 2516 2517 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2518 xdev->chan[chan->id] = chan; 2519 2520 /* Reset the channel */ 2521 err = xilinx_dma_chan_reset(chan); 2522 if (err < 0) { 2523 dev_err(xdev->dev, "Reset channel failed\n"); 2524 return err; 2525 } 2526 2527 return 0; 2528 } 2529 2530 /** 2531 * xilinx_dma_child_probe - Per child node probe 2532 * It get number of dma-channels per child node from 2533 * device-tree and initializes all the channels. 2534 * 2535 * @xdev: Driver specific device structure 2536 * @node: Device node 2537 * 2538 * Return: 0 always. 2539 */ 2540 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2541 struct device_node *node) 2542 { 2543 int ret, i, nr_channels = 1; 2544 2545 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2546 if ((ret < 0) && xdev->mcdma) 2547 dev_warn(xdev->dev, "missing dma-channels property\n"); 2548 2549 for (i = 0; i < nr_channels; i++) 2550 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2551 2552 xdev->nr_channels += nr_channels; 2553 2554 return 0; 2555 } 2556 2557 /** 2558 * of_dma_xilinx_xlate - Translation function 2559 * @dma_spec: Pointer to DMA specifier as found in the device tree 2560 * @ofdma: Pointer to DMA controller data 2561 * 2562 * Return: DMA channel pointer on success and NULL on error 2563 */ 2564 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2565 struct of_dma *ofdma) 2566 { 2567 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2568 int chan_id = dma_spec->args[0]; 2569 2570 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2571 return NULL; 2572 2573 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2574 } 2575 2576 static const struct xilinx_dma_config axidma_config = { 2577 .dmatype = XDMA_TYPE_AXIDMA, 2578 .clk_init = axidma_clk_init, 2579 }; 2580 2581 static const struct xilinx_dma_config axicdma_config = { 2582 .dmatype = XDMA_TYPE_CDMA, 2583 .clk_init = axicdma_clk_init, 2584 }; 2585 2586 static const struct xilinx_dma_config axivdma_config = { 2587 .dmatype = XDMA_TYPE_VDMA, 2588 .clk_init = axivdma_clk_init, 2589 }; 2590 2591 static const struct of_device_id xilinx_dma_of_ids[] = { 2592 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2593 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2594 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2595 {} 2596 }; 2597 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2598 2599 /** 2600 * xilinx_dma_probe - Driver probe function 2601 * @pdev: Pointer to the platform_device structure 2602 * 2603 * Return: '0' on success and failure value on error 2604 */ 2605 static int xilinx_dma_probe(struct platform_device *pdev) 2606 { 2607 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2608 struct clk **, struct clk **, struct clk **) 2609 = axivdma_clk_init; 2610 struct device_node *node = pdev->dev.of_node; 2611 struct xilinx_dma_device *xdev; 2612 struct device_node *child, *np = pdev->dev.of_node; 2613 struct resource *io; 2614 u32 num_frames, addr_width, len_width; 2615 int i, err; 2616 2617 /* Allocate and initialize the DMA engine structure */ 2618 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2619 if (!xdev) 2620 return -ENOMEM; 2621 2622 xdev->dev = &pdev->dev; 2623 if (np) { 2624 const struct of_device_id *match; 2625 2626 match = of_match_node(xilinx_dma_of_ids, np); 2627 if (match && match->data) { 2628 xdev->dma_config = match->data; 2629 clk_init = xdev->dma_config->clk_init; 2630 } 2631 } 2632 2633 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2634 &xdev->rx_clk, &xdev->rxs_clk); 2635 if (err) 2636 return err; 2637 2638 /* Request and map I/O memory */ 2639 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2640 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2641 if (IS_ERR(xdev->regs)) 2642 return PTR_ERR(xdev->regs); 2643 2644 /* Retrieve the DMA engine properties from the device tree */ 2645 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 2646 2647 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2648 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2649 if (!of_property_read_u32(node, "xlnx,sg-length-width", 2650 &len_width)) { 2651 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 2652 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 2653 dev_warn(xdev->dev, 2654 "invalid xlnx,sg-length-width property value. Using default width\n"); 2655 } else { 2656 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 2657 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 2658 xdev->max_buffer_len = 2659 GENMASK(len_width - 1, 0); 2660 } 2661 } 2662 } 2663 2664 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2665 err = of_property_read_u32(node, "xlnx,num-fstores", 2666 &num_frames); 2667 if (err < 0) { 2668 dev_err(xdev->dev, 2669 "missing xlnx,num-fstores property\n"); 2670 return err; 2671 } 2672 2673 err = of_property_read_u32(node, "xlnx,flush-fsync", 2674 &xdev->flush_on_fsync); 2675 if (err < 0) 2676 dev_warn(xdev->dev, 2677 "missing xlnx,flush-fsync property\n"); 2678 } 2679 2680 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2681 if (err < 0) 2682 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2683 2684 if (addr_width > 32) 2685 xdev->ext_addr = true; 2686 else 2687 xdev->ext_addr = false; 2688 2689 /* Set the dma mask bits */ 2690 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2691 2692 /* Initialize the DMA engine */ 2693 xdev->common.dev = &pdev->dev; 2694 2695 INIT_LIST_HEAD(&xdev->common.channels); 2696 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2697 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2698 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2699 } 2700 2701 xdev->common.device_alloc_chan_resources = 2702 xilinx_dma_alloc_chan_resources; 2703 xdev->common.device_free_chan_resources = 2704 xilinx_dma_free_chan_resources; 2705 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2706 xdev->common.device_tx_status = xilinx_dma_tx_status; 2707 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2708 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2709 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2710 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2711 xdev->common.device_prep_dma_cyclic = 2712 xilinx_dma_prep_dma_cyclic; 2713 xdev->common.device_prep_interleaved_dma = 2714 xilinx_dma_prep_interleaved; 2715 /* Residue calculation is supported by only AXI DMA */ 2716 xdev->common.residue_granularity = 2717 DMA_RESIDUE_GRANULARITY_SEGMENT; 2718 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2719 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2720 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2721 } else { 2722 xdev->common.device_prep_interleaved_dma = 2723 xilinx_vdma_dma_prep_interleaved; 2724 } 2725 2726 platform_set_drvdata(pdev, xdev); 2727 2728 /* Initialize the channels */ 2729 for_each_child_of_node(node, child) { 2730 err = xilinx_dma_child_probe(xdev, child); 2731 if (err < 0) 2732 goto disable_clks; 2733 } 2734 2735 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2736 for (i = 0; i < xdev->nr_channels; i++) 2737 if (xdev->chan[i]) 2738 xdev->chan[i]->num_frms = num_frames; 2739 } 2740 2741 /* Register the DMA engine with the core */ 2742 dma_async_device_register(&xdev->common); 2743 2744 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2745 xdev); 2746 if (err < 0) { 2747 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2748 dma_async_device_unregister(&xdev->common); 2749 goto error; 2750 } 2751 2752 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2753 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 2754 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 2755 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 2756 else 2757 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2758 2759 return 0; 2760 2761 disable_clks: 2762 xdma_disable_allclks(xdev); 2763 error: 2764 for (i = 0; i < xdev->nr_channels; i++) 2765 if (xdev->chan[i]) 2766 xilinx_dma_chan_remove(xdev->chan[i]); 2767 2768 return err; 2769 } 2770 2771 /** 2772 * xilinx_dma_remove - Driver remove function 2773 * @pdev: Pointer to the platform_device structure 2774 * 2775 * Return: Always '0' 2776 */ 2777 static int xilinx_dma_remove(struct platform_device *pdev) 2778 { 2779 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2780 int i; 2781 2782 of_dma_controller_free(pdev->dev.of_node); 2783 2784 dma_async_device_unregister(&xdev->common); 2785 2786 for (i = 0; i < xdev->nr_channels; i++) 2787 if (xdev->chan[i]) 2788 xilinx_dma_chan_remove(xdev->chan[i]); 2789 2790 xdma_disable_allclks(xdev); 2791 2792 return 0; 2793 } 2794 2795 static struct platform_driver xilinx_vdma_driver = { 2796 .driver = { 2797 .name = "xilinx-vdma", 2798 .of_match_table = xilinx_dma_of_ids, 2799 }, 2800 .probe = xilinx_dma_probe, 2801 .remove = xilinx_dma_remove, 2802 }; 2803 2804 module_platform_driver(xilinx_vdma_driver); 2805 2806 MODULE_AUTHOR("Xilinx, Inc."); 2807 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2808 MODULE_LICENSE("GPL v2"); 2809