1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Xilinx ZynqMP DPDMA Engine driver 4 * 5 * Copyright (C) 2015 - 2020 Xilinx, Inc. 6 * 7 * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/bits.h> 12 #include <linux/clk.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dmapool.h> 17 #include <linux/interrupt.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/platform_device.h> 22 #include <linux/sched.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 #include <linux/wait.h> 26 27 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h> 28 29 #include "../dmaengine.h" 30 #include "../virt-dma.h" 31 32 /* DPDMA registers */ 33 #define XILINX_DPDMA_ERR_CTRL 0x000 34 #define XILINX_DPDMA_ISR 0x004 35 #define XILINX_DPDMA_IMR 0x008 36 #define XILINX_DPDMA_IEN 0x00c 37 #define XILINX_DPDMA_IDS 0x010 38 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0) 39 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0) 40 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6) 41 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6) 42 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12) 43 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12) 44 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16) 45 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18) 46 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24) 47 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25) 48 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26) 49 #define XILINX_DPDMA_INTR_VSYNC BIT(27) 50 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000 51 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000 52 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000 53 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000 54 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041 55 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000 56 #define XILINX_DPDMA_INTR_ALL 0x0fffffff 57 #define XILINX_DPDMA_EISR 0x014 58 #define XILINX_DPDMA_EIMR 0x018 59 #define XILINX_DPDMA_EIEN 0x01c 60 #define XILINX_DPDMA_EIDS 0x020 61 #define XILINX_DPDMA_EINTR_INV_APB BIT(0) 62 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1) 63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1) 64 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7) 65 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7) 66 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13) 67 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13) 68 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19) 69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19) 70 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25) 71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25) 72 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32) 73 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082 74 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe 75 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001 76 #define XILINX_DPDMA_EINTR_ALL 0xffffffff 77 #define XILINX_DPDMA_CNTL 0x100 78 #define XILINX_DPDMA_GBL 0x104 79 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0) 80 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6) 81 #define XILINX_DPDMA_ALC0_CNTL 0x108 82 #define XILINX_DPDMA_ALC0_STATUS 0x10c 83 #define XILINX_DPDMA_ALC0_MAX 0x110 84 #define XILINX_DPDMA_ALC0_MIN 0x114 85 #define XILINX_DPDMA_ALC0_ACC 0x118 86 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c 87 #define XILINX_DPDMA_ALC1_CNTL 0x120 88 #define XILINX_DPDMA_ALC1_STATUS 0x124 89 #define XILINX_DPDMA_ALC1_MAX 0x128 90 #define XILINX_DPDMA_ALC1_MIN 0x12c 91 #define XILINX_DPDMA_ALC1_ACC 0x130 92 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134 93 94 /* Channel register */ 95 #define XILINX_DPDMA_CH_BASE 0x200 96 #define XILINX_DPDMA_CH_OFFSET 0x100 97 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000 98 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0) 99 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004 100 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008 101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c 102 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010 103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014 104 #define XILINX_DPDMA_CH_CNTL 0x018 105 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0) 106 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1) 107 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2) 108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6) 109 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10) 110 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11 111 #define XILINX_DPDMA_CH_STATUS 0x01c 112 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21) 113 #define XILINX_DPDMA_CH_VDO 0x020 114 #define XILINX_DPDMA_CH_PYLD_SZ 0x024 115 #define XILINX_DPDMA_CH_DESC_ID 0x028 116 #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0) 117 118 /* DPDMA descriptor fields */ 119 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5 120 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8) 121 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9) 122 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10) 123 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18) 124 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19) 125 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20) 126 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21) 127 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0) 128 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0) 129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18) 130 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0) 131 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16) 132 133 #define XILINX_DPDMA_ALIGN_BYTES 256 134 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128 135 136 #define XILINX_DPDMA_NUM_CHAN 6 137 138 struct xilinx_dpdma_chan; 139 140 /** 141 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor 142 * @control: control configuration field 143 * @desc_id: descriptor ID 144 * @xfer_size: transfer size 145 * @hsize_stride: horizontal size and stride 146 * @timestamp_lsb: LSB of time stamp 147 * @timestamp_msb: MSB of time stamp 148 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) 149 * @next_desc: next descriptor 32 bit address 150 * @src_addr: payload source address (1st page, 32 LSB) 151 * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs) 152 * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) 153 * @src_addr2: payload source address (2nd page, 32 LSB) 154 * @src_addr3: payload source address (3rd page, 32 LSB) 155 * @src_addr4: payload source address (4th page, 32 LSB) 156 * @src_addr5: payload source address (5th page, 32 LSB) 157 * @crc: descriptor CRC 158 */ 159 struct xilinx_dpdma_hw_desc { 160 u32 control; 161 u32 desc_id; 162 u32 xfer_size; 163 u32 hsize_stride; 164 u32 timestamp_lsb; 165 u32 timestamp_msb; 166 u32 addr_ext; 167 u32 next_desc; 168 u32 src_addr; 169 u32 addr_ext_23; 170 u32 addr_ext_45; 171 u32 src_addr2; 172 u32 src_addr3; 173 u32 src_addr4; 174 u32 src_addr5; 175 u32 crc; 176 } __aligned(XILINX_DPDMA_ALIGN_BYTES); 177 178 /** 179 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor 180 * @hw: DPDMA hardware descriptor 181 * @node: list node for software descriptors 182 * @dma_addr: DMA address of the software descriptor 183 */ 184 struct xilinx_dpdma_sw_desc { 185 struct xilinx_dpdma_hw_desc hw; 186 struct list_head node; 187 dma_addr_t dma_addr; 188 }; 189 190 /** 191 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor 192 * @vdesc: virtual DMA descriptor 193 * @chan: DMA channel 194 * @descriptors: list of software descriptors 195 * @error: an error has been detected with this descriptor 196 */ 197 struct xilinx_dpdma_tx_desc { 198 struct virt_dma_desc vdesc; 199 struct xilinx_dpdma_chan *chan; 200 struct list_head descriptors; 201 bool error; 202 }; 203 204 #define to_dpdma_tx_desc(_desc) \ 205 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc) 206 207 /** 208 * struct xilinx_dpdma_chan - DPDMA channel 209 * @vchan: virtual DMA channel 210 * @reg: register base address 211 * @id: channel ID 212 * @wait_to_stop: queue to wait for outstanding transacitons before stopping 213 * @running: true if the channel is running 214 * @first_frame: flag for the first frame of stream 215 * @video_group: flag if multi-channel operation is needed for video channels 216 * @lock: lock to access struct xilinx_dpdma_chan 217 * @desc_pool: descriptor allocation pool 218 * @err_task: error IRQ bottom half handler 219 * @desc: References to descriptors being processed 220 * @desc.pending: Descriptor schedule to the hardware, pending execution 221 * @desc.active: Descriptor being executed by the hardware 222 * @xdev: DPDMA device 223 */ 224 struct xilinx_dpdma_chan { 225 struct virt_dma_chan vchan; 226 void __iomem *reg; 227 unsigned int id; 228 229 wait_queue_head_t wait_to_stop; 230 bool running; 231 bool first_frame; 232 bool video_group; 233 234 spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */ 235 struct dma_pool *desc_pool; 236 struct tasklet_struct err_task; 237 238 struct { 239 struct xilinx_dpdma_tx_desc *pending; 240 struct xilinx_dpdma_tx_desc *active; 241 } desc; 242 243 struct xilinx_dpdma_device *xdev; 244 }; 245 246 #define to_xilinx_chan(_chan) \ 247 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan) 248 249 /** 250 * struct xilinx_dpdma_device - DPDMA device 251 * @common: generic dma device structure 252 * @reg: register base address 253 * @dev: generic device structure 254 * @irq: the interrupt number 255 * @axi_clk: axi clock 256 * @chan: DPDMA channels 257 * @ext_addr: flag for 64 bit system (48 bit addressing) 258 */ 259 struct xilinx_dpdma_device { 260 struct dma_device common; 261 void __iomem *reg; 262 struct device *dev; 263 int irq; 264 265 struct clk *axi_clk; 266 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN]; 267 268 bool ext_addr; 269 }; 270 271 /* ----------------------------------------------------------------------------- 272 * DebugFS 273 */ 274 275 #ifdef CONFIG_DEBUG_FS 276 277 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32 278 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535" 279 280 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ 281 enum xilinx_dpdma_testcases { 282 DPDMA_TC_INTR_DONE, 283 DPDMA_TC_NONE 284 }; 285 286 struct xilinx_dpdma_debugfs { 287 enum xilinx_dpdma_testcases testcase; 288 u16 xilinx_dpdma_irq_done_count; 289 unsigned int chan_id; 290 }; 291 292 static struct xilinx_dpdma_debugfs dpdma_debugfs; 293 struct xilinx_dpdma_debugfs_request { 294 const char *name; 295 enum xilinx_dpdma_testcases tc; 296 ssize_t (*read)(char *buf); 297 int (*write)(char *args); 298 }; 299 300 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) 301 { 302 if (chan->id == dpdma_debugfs.chan_id) 303 dpdma_debugfs.xilinx_dpdma_irq_done_count++; 304 } 305 306 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf) 307 { 308 size_t out_str_len; 309 310 dpdma_debugfs.testcase = DPDMA_TC_NONE; 311 312 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR); 313 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, 314 out_str_len); 315 snprintf(buf, out_str_len, "%d", 316 dpdma_debugfs.xilinx_dpdma_irq_done_count); 317 318 return 0; 319 } 320 321 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args) 322 { 323 char *arg; 324 int ret; 325 u32 id; 326 327 arg = strsep(&args, " "); 328 if (!arg || strncasecmp(arg, "start", 5)) 329 return -EINVAL; 330 331 arg = strsep(&args, " "); 332 if (!arg) 333 return -EINVAL; 334 335 ret = kstrtou32(arg, 0, &id); 336 if (ret < 0) 337 return ret; 338 339 if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1) 340 return -EINVAL; 341 342 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE; 343 dpdma_debugfs.xilinx_dpdma_irq_done_count = 0; 344 dpdma_debugfs.chan_id = id; 345 346 return 0; 347 } 348 349 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ 350 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = { 351 { 352 .name = "DESCRIPTOR_DONE_INTR", 353 .tc = DPDMA_TC_INTR_DONE, 354 .read = xilinx_dpdma_debugfs_desc_done_irq_read, 355 .write = xilinx_dpdma_debugfs_desc_done_irq_write, 356 }, 357 }; 358 359 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf, 360 size_t size, loff_t *pos) 361 { 362 enum xilinx_dpdma_testcases testcase; 363 char *kern_buff; 364 int ret = 0; 365 366 if (*pos != 0 || size <= 0) 367 return -EINVAL; 368 369 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL); 370 if (!kern_buff) { 371 dpdma_debugfs.testcase = DPDMA_TC_NONE; 372 return -ENOMEM; 373 } 374 375 testcase = READ_ONCE(dpdma_debugfs.testcase); 376 if (testcase != DPDMA_TC_NONE) { 377 ret = dpdma_debugfs_reqs[testcase].read(kern_buff); 378 if (ret < 0) 379 goto done; 380 } else { 381 strlcpy(kern_buff, "No testcase executed", 382 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE); 383 } 384 385 size = min(size, strlen(kern_buff)); 386 if (copy_to_user(buf, kern_buff, size)) 387 ret = -EFAULT; 388 389 done: 390 kfree(kern_buff); 391 if (ret) 392 return ret; 393 394 *pos = size + 1; 395 return size; 396 } 397 398 static ssize_t xilinx_dpdma_debugfs_write(struct file *f, 399 const char __user *buf, size_t size, 400 loff_t *pos) 401 { 402 char *kern_buff, *kern_buff_start; 403 char *testcase; 404 unsigned int i; 405 int ret; 406 407 if (*pos != 0 || size <= 0) 408 return -EINVAL; 409 410 /* Supporting single instance of test as of now. */ 411 if (dpdma_debugfs.testcase != DPDMA_TC_NONE) 412 return -EBUSY; 413 414 kern_buff = kzalloc(size, GFP_KERNEL); 415 if (!kern_buff) 416 return -ENOMEM; 417 kern_buff_start = kern_buff; 418 419 ret = strncpy_from_user(kern_buff, buf, size); 420 if (ret < 0) 421 goto done; 422 423 /* Read the testcase name from a user request. */ 424 testcase = strsep(&kern_buff, " "); 425 426 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) { 427 if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name)) 428 break; 429 } 430 431 if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) { 432 ret = -EINVAL; 433 goto done; 434 } 435 436 ret = dpdma_debugfs_reqs[i].write(kern_buff); 437 if (ret < 0) 438 goto done; 439 440 ret = size; 441 442 done: 443 kfree(kern_buff_start); 444 return ret; 445 } 446 447 static const struct file_operations fops_xilinx_dpdma_dbgfs = { 448 .owner = THIS_MODULE, 449 .read = xilinx_dpdma_debugfs_read, 450 .write = xilinx_dpdma_debugfs_write, 451 }; 452 453 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) 454 { 455 struct dentry *dent; 456 457 dpdma_debugfs.testcase = DPDMA_TC_NONE; 458 459 dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root, 460 NULL, &fops_xilinx_dpdma_dbgfs); 461 if (IS_ERR(dent)) 462 dev_err(xdev->dev, "Failed to create debugfs testcase file\n"); 463 } 464 465 #else 466 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) 467 { 468 } 469 470 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) 471 { 472 } 473 #endif /* CONFIG_DEBUG_FS */ 474 475 /* ----------------------------------------------------------------------------- 476 * I/O Accessors 477 */ 478 479 static inline u32 dpdma_read(void __iomem *base, u32 offset) 480 { 481 return ioread32(base + offset); 482 } 483 484 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val) 485 { 486 iowrite32(val, base + offset); 487 } 488 489 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr) 490 { 491 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr); 492 } 493 494 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set) 495 { 496 dpdma_write(base, offset, dpdma_read(base, offset) | set); 497 } 498 499 /* ----------------------------------------------------------------------------- 500 * Descriptor Operations 501 */ 502 503 /** 504 * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor 505 * @xdev: DPDMA device 506 * @sw_desc: The software descriptor in which to set DMA addresses 507 * @prev: The previous descriptor 508 * @dma_addr: array of dma addresses 509 * @num_src_addr: number of addresses in @dma_addr 510 * 511 * Set all the DMA addresses in the hardware descriptor corresponding to @dev 512 * from @dma_addr. If a previous descriptor is specified in @prev, its next 513 * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be 514 * identical to @sw_desc for cyclic transfers. 515 */ 516 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev, 517 struct xilinx_dpdma_sw_desc *sw_desc, 518 struct xilinx_dpdma_sw_desc *prev, 519 dma_addr_t dma_addr[], 520 unsigned int num_src_addr) 521 { 522 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; 523 unsigned int i; 524 525 hw_desc->src_addr = lower_32_bits(dma_addr[0]); 526 if (xdev->ext_addr) 527 hw_desc->addr_ext |= 528 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK, 529 upper_32_bits(dma_addr[0])); 530 531 for (i = 1; i < num_src_addr; i++) { 532 u32 *addr = &hw_desc->src_addr2; 533 534 addr[i - 1] = lower_32_bits(dma_addr[i]); 535 536 if (xdev->ext_addr) { 537 u32 *addr_ext = &hw_desc->addr_ext_23; 538 u32 addr_msb; 539 540 addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0); 541 addr_msb <<= 16 * ((i - 1) % 2); 542 addr_ext[(i - 1) / 2] |= addr_msb; 543 } 544 } 545 546 if (!prev) 547 return; 548 549 prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr); 550 if (xdev->ext_addr) 551 prev->hw.addr_ext |= 552 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK, 553 upper_32_bits(sw_desc->dma_addr)); 554 } 555 556 /** 557 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor 558 * @chan: DPDMA channel 559 * 560 * Allocate a software descriptor from the channel's descriptor pool. 561 * 562 * Return: a software descriptor or NULL. 563 */ 564 static struct xilinx_dpdma_sw_desc * 565 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan) 566 { 567 struct xilinx_dpdma_sw_desc *sw_desc; 568 dma_addr_t dma_addr; 569 570 sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr); 571 if (!sw_desc) 572 return NULL; 573 574 sw_desc->dma_addr = dma_addr; 575 576 return sw_desc; 577 } 578 579 /** 580 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor 581 * @chan: DPDMA channel 582 * @sw_desc: software descriptor to free 583 * 584 * Free a software descriptor from the channel's descriptor pool. 585 */ 586 static void 587 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan, 588 struct xilinx_dpdma_sw_desc *sw_desc) 589 { 590 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr); 591 } 592 593 /** 594 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor 595 * @chan: DPDMA channel 596 * @tx_desc: tx descriptor to dump 597 * 598 * Dump contents of a tx descriptor 599 */ 600 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan, 601 struct xilinx_dpdma_tx_desc *tx_desc) 602 { 603 struct xilinx_dpdma_sw_desc *sw_desc; 604 struct device *dev = chan->xdev->dev; 605 unsigned int i = 0; 606 607 dev_dbg(dev, "------- TX descriptor dump start -------\n"); 608 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id); 609 610 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) { 611 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; 612 613 dev_dbg(dev, "------- HW descriptor %d -------\n", i++); 614 dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr); 615 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control); 616 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id); 617 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size); 618 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride); 619 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb); 620 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb); 621 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext); 622 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc); 623 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr); 624 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23); 625 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45); 626 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2); 627 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3); 628 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4); 629 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5); 630 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc); 631 } 632 633 dev_dbg(dev, "------- TX descriptor dump end -------\n"); 634 } 635 636 /** 637 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor 638 * @chan: DPDMA channel 639 * 640 * Allocate a tx descriptor. 641 * 642 * Return: a tx descriptor or NULL. 643 */ 644 static struct xilinx_dpdma_tx_desc * 645 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan) 646 { 647 struct xilinx_dpdma_tx_desc *tx_desc; 648 649 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT); 650 if (!tx_desc) 651 return NULL; 652 653 INIT_LIST_HEAD(&tx_desc->descriptors); 654 tx_desc->chan = chan; 655 tx_desc->error = false; 656 657 return tx_desc; 658 } 659 660 /** 661 * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor 662 * @vdesc: virtual DMA descriptor 663 * 664 * Free the virtual DMA descriptor @vdesc including its software descriptors. 665 */ 666 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) 667 { 668 struct xilinx_dpdma_sw_desc *sw_desc, *next; 669 struct xilinx_dpdma_tx_desc *desc; 670 671 if (!vdesc) 672 return; 673 674 desc = to_dpdma_tx_desc(vdesc); 675 676 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) { 677 list_del(&sw_desc->node); 678 xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc); 679 } 680 681 kfree(desc); 682 } 683 684 /** 685 * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma 686 * descriptor 687 * @chan: DPDMA channel 688 * @xt: dma interleaved template 689 * 690 * Prepare a tx descriptor including internal software/hardware descriptors 691 * based on @xt. 692 * 693 * Return: A DPDMA TX descriptor on success, or NULL. 694 */ 695 static struct xilinx_dpdma_tx_desc * 696 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan, 697 struct dma_interleaved_template *xt) 698 { 699 struct xilinx_dpdma_tx_desc *tx_desc; 700 struct xilinx_dpdma_sw_desc *sw_desc; 701 struct xilinx_dpdma_hw_desc *hw_desc; 702 size_t hsize = xt->sgl[0].size; 703 size_t stride = hsize + xt->sgl[0].icg; 704 705 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) { 706 dev_err(chan->xdev->dev, 707 "chan%u: buffer should be aligned at %d B\n", 708 chan->id, XILINX_DPDMA_ALIGN_BYTES); 709 return NULL; 710 } 711 712 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan); 713 if (!tx_desc) 714 return NULL; 715 716 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan); 717 if (!sw_desc) { 718 xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc); 719 return NULL; 720 } 721 722 xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc, 723 &xt->src_start, 1); 724 725 hw_desc = &sw_desc->hw; 726 hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8); 727 hw_desc->xfer_size = hsize * xt->numf; 728 hw_desc->hsize_stride = 729 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) | 730 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK, 731 stride / 16); 732 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE; 733 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR; 734 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE; 735 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME; 736 737 list_add_tail(&sw_desc->node, &tx_desc->descriptors); 738 739 return tx_desc; 740 } 741 742 /* ----------------------------------------------------------------------------- 743 * DPDMA Channel Operations 744 */ 745 746 /** 747 * xilinx_dpdma_chan_enable - Enable the channel 748 * @chan: DPDMA channel 749 * 750 * Enable the channel and its interrupts. Set the QoS values for video class. 751 */ 752 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan) 753 { 754 u32 reg; 755 756 reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id) 757 | XILINX_DPDMA_INTR_GLOBAL_MASK; 758 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); 759 reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id) 760 | XILINX_DPDMA_INTR_GLOBAL_ERR; 761 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); 762 763 reg = XILINX_DPDMA_CH_CNTL_ENABLE 764 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK, 765 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) 766 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK, 767 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) 768 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK, 769 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS); 770 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg); 771 } 772 773 /** 774 * xilinx_dpdma_chan_disable - Disable the channel 775 * @chan: DPDMA channel 776 * 777 * Disable the channel and its interrupts. 778 */ 779 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan) 780 { 781 u32 reg; 782 783 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id; 784 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); 785 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id; 786 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); 787 788 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); 789 } 790 791 /** 792 * xilinx_dpdma_chan_pause - Pause the channel 793 * @chan: DPDMA channel 794 * 795 * Pause the channel. 796 */ 797 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan) 798 { 799 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); 800 } 801 802 /** 803 * xilinx_dpdma_chan_unpause - Unpause the channel 804 * @chan: DPDMA channel 805 * 806 * Unpause the channel. 807 */ 808 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan) 809 { 810 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); 811 } 812 813 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan) 814 { 815 struct xilinx_dpdma_device *xdev = chan->xdev; 816 u32 channels = 0; 817 unsigned int i; 818 819 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { 820 if (xdev->chan[i]->video_group && !xdev->chan[i]->running) 821 return 0; 822 823 if (xdev->chan[i]->video_group) 824 channels |= BIT(i); 825 } 826 827 return channels; 828 } 829 830 /** 831 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer 832 * @chan: DPDMA channel 833 * 834 * Queue the next descriptor, if any, to the hardware. If the channel is 835 * stopped, start it first. Otherwise retrigger it with the next descriptor. 836 */ 837 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) 838 { 839 struct xilinx_dpdma_device *xdev = chan->xdev; 840 struct xilinx_dpdma_sw_desc *sw_desc; 841 struct xilinx_dpdma_tx_desc *desc; 842 struct virt_dma_desc *vdesc; 843 u32 reg, channels; 844 bool first_frame; 845 846 lockdep_assert_held(&chan->lock); 847 848 if (chan->desc.pending) 849 return; 850 851 if (!chan->running) { 852 xilinx_dpdma_chan_unpause(chan); 853 xilinx_dpdma_chan_enable(chan); 854 chan->first_frame = true; 855 chan->running = true; 856 } 857 858 vdesc = vchan_next_desc(&chan->vchan); 859 if (!vdesc) 860 return; 861 862 desc = to_dpdma_tx_desc(vdesc); 863 chan->desc.pending = desc; 864 list_del(&desc->vdesc.node); 865 866 /* 867 * Assign the cookie to descriptors in this transaction. Only 16 bit 868 * will be used, but it should be enough. 869 */ 870 list_for_each_entry(sw_desc, &desc->descriptors, node) 871 sw_desc->hw.desc_id = desc->vdesc.tx.cookie 872 & XILINX_DPDMA_CH_DESC_ID_MASK; 873 874 sw_desc = list_first_entry(&desc->descriptors, 875 struct xilinx_dpdma_sw_desc, node); 876 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR, 877 lower_32_bits(sw_desc->dma_addr)); 878 if (xdev->ext_addr) 879 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE, 880 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, 881 upper_32_bits(sw_desc->dma_addr))); 882 883 first_frame = chan->first_frame; 884 chan->first_frame = false; 885 886 if (chan->video_group) { 887 channels = xilinx_dpdma_chan_video_group_ready(chan); 888 /* 889 * Trigger the transfer only when all channels in the group are 890 * ready. 891 */ 892 if (!channels) 893 return; 894 } else { 895 channels = BIT(chan->id); 896 } 897 898 if (first_frame) 899 reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); 900 else 901 reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); 902 903 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); 904 } 905 906 /** 907 * xilinx_dpdma_chan_ostand - Number of outstanding transactions 908 * @chan: DPDMA channel 909 * 910 * Read and return the number of outstanding transactions from register. 911 * 912 * Return: Number of outstanding transactions from the status register. 913 */ 914 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan) 915 { 916 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK, 917 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS)); 918 } 919 920 /** 921 * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event 922 * @chan: DPDMA channel 923 * 924 * Notify waiters for no outstanding event, so waiters can stop the channel 925 * safely. This function is supposed to be called when 'no outstanding' 926 * interrupt is generated. The 'no outstanding' interrupt is disabled and 927 * should be re-enabled when this event is handled. If the channel status 928 * register still shows some number of outstanding transactions, the interrupt 929 * remains enabled. 930 * 931 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding 932 * transaction(s). 933 */ 934 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan) 935 { 936 u32 cnt; 937 938 cnt = xilinx_dpdma_chan_ostand(chan); 939 if (cnt) { 940 dev_dbg(chan->xdev->dev, 941 "chan%u: %d outstanding transactions\n", 942 chan->id, cnt); 943 return -EWOULDBLOCK; 944 } 945 946 /* Disable 'no outstanding' interrupt */ 947 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS, 948 XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 949 wake_up(&chan->wait_to_stop); 950 951 return 0; 952 } 953 954 /** 955 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq 956 * @chan: DPDMA channel 957 * 958 * Wait for the no outstanding transaction interrupt. This functions can sleep 959 * for 50ms. 960 * 961 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code 962 * from wait_event_interruptible_timeout(). 963 */ 964 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan) 965 { 966 int ret; 967 968 /* Wait for a no outstanding transaction interrupt upto 50msec */ 969 ret = wait_event_interruptible_timeout(chan->wait_to_stop, 970 !xilinx_dpdma_chan_ostand(chan), 971 msecs_to_jiffies(50)); 972 if (ret > 0) { 973 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, 974 XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 975 return 0; 976 } 977 978 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n", 979 chan->id, xilinx_dpdma_chan_ostand(chan)); 980 981 if (ret == 0) 982 return -ETIMEDOUT; 983 984 return ret; 985 } 986 987 /** 988 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status 989 * @chan: DPDMA channel 990 * 991 * Poll the outstanding transaction status, and return when there's no 992 * outstanding transaction. This functions can be used in the interrupt context 993 * or where the atomicity is required. Calling thread may wait more than 50ms. 994 * 995 * Return: 0 on success, or -ETIMEDOUT. 996 */ 997 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan) 998 { 999 u32 cnt, loop = 50000; 1000 1001 /* Poll at least for 50ms (20 fps). */ 1002 do { 1003 cnt = xilinx_dpdma_chan_ostand(chan); 1004 udelay(1); 1005 } while (loop-- > 0 && cnt); 1006 1007 if (loop) { 1008 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, 1009 XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 1010 return 0; 1011 } 1012 1013 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n", 1014 chan->id, xilinx_dpdma_chan_ostand(chan)); 1015 1016 return -ETIMEDOUT; 1017 } 1018 1019 /** 1020 * xilinx_dpdma_chan_stop - Stop the channel 1021 * @chan: DPDMA channel 1022 * 1023 * Stop a previously paused channel by first waiting for completion of all 1024 * outstanding transaction and then disabling the channel. 1025 * 1026 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. 1027 */ 1028 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) 1029 { 1030 unsigned long flags; 1031 int ret; 1032 1033 ret = xilinx_dpdma_chan_wait_no_ostand(chan); 1034 if (ret) 1035 return ret; 1036 1037 spin_lock_irqsave(&chan->lock, flags); 1038 xilinx_dpdma_chan_disable(chan); 1039 chan->running = false; 1040 spin_unlock_irqrestore(&chan->lock, flags); 1041 1042 return 0; 1043 } 1044 1045 /** 1046 * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion 1047 * @chan: DPDMA channel 1048 * 1049 * Handle completion of the currently active descriptor (@chan->desc.active). As 1050 * we currently support cyclic transfers only, this just invokes the cyclic 1051 * callback. The descriptor will be completed at the VSYNC interrupt when a new 1052 * descriptor replaces it. 1053 */ 1054 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) 1055 { 1056 struct xilinx_dpdma_tx_desc *active; 1057 unsigned long flags; 1058 1059 spin_lock_irqsave(&chan->lock, flags); 1060 1061 xilinx_dpdma_debugfs_desc_done_irq(chan); 1062 1063 active = chan->desc.active; 1064 if (active) 1065 vchan_cyclic_callback(&active->vdesc); 1066 else 1067 dev_warn(chan->xdev->dev, 1068 "chan%u: DONE IRQ with no active descriptor!\n", 1069 chan->id); 1070 1071 spin_unlock_irqrestore(&chan->lock, flags); 1072 } 1073 1074 /** 1075 * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling 1076 * @chan: DPDMA channel 1077 * 1078 * At VSYNC the active descriptor may have been replaced by the pending 1079 * descriptor. Detect this through the DESC_ID and perform appropriate 1080 * bookkeeping. 1081 */ 1082 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) 1083 { 1084 struct xilinx_dpdma_tx_desc *pending; 1085 struct xilinx_dpdma_sw_desc *sw_desc; 1086 unsigned long flags; 1087 u32 desc_id; 1088 1089 spin_lock_irqsave(&chan->lock, flags); 1090 1091 pending = chan->desc.pending; 1092 if (!chan->running || !pending) 1093 goto out; 1094 1095 desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID) 1096 & XILINX_DPDMA_CH_DESC_ID_MASK; 1097 1098 /* If the retrigger raced with vsync, retry at the next frame. */ 1099 sw_desc = list_first_entry(&pending->descriptors, 1100 struct xilinx_dpdma_sw_desc, node); 1101 if (sw_desc->hw.desc_id != desc_id) { 1102 dev_dbg(chan->xdev->dev, 1103 "chan%u: vsync race lost (%u != %u), retrying\n", 1104 chan->id, sw_desc->hw.desc_id, desc_id); 1105 goto out; 1106 } 1107 1108 /* 1109 * Complete the active descriptor, if any, promote the pending 1110 * descriptor to active, and queue the next transfer, if any. 1111 */ 1112 if (chan->desc.active) 1113 vchan_cookie_complete(&chan->desc.active->vdesc); 1114 chan->desc.active = pending; 1115 chan->desc.pending = NULL; 1116 1117 xilinx_dpdma_chan_queue_transfer(chan); 1118 1119 out: 1120 spin_unlock_irqrestore(&chan->lock, flags); 1121 } 1122 1123 /** 1124 * xilinx_dpdma_chan_err - Detect any channel error 1125 * @chan: DPDMA channel 1126 * @isr: masked Interrupt Status Register 1127 * @eisr: Error Interrupt Status Register 1128 * 1129 * Return: true if any channel error occurs, or false otherwise. 1130 */ 1131 static bool 1132 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr) 1133 { 1134 if (!chan) 1135 return false; 1136 1137 if (chan->running && 1138 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) || 1139 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)))) 1140 return true; 1141 1142 return false; 1143 } 1144 1145 /** 1146 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling 1147 * @chan: DPDMA channel 1148 * 1149 * This function is called when any channel error or any global error occurs. 1150 * The function disables the paused channel by errors and determines 1151 * if the current active descriptor can be rescheduled depending on 1152 * the descriptor status. 1153 */ 1154 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) 1155 { 1156 struct xilinx_dpdma_device *xdev = chan->xdev; 1157 struct xilinx_dpdma_tx_desc *active; 1158 unsigned long flags; 1159 1160 spin_lock_irqsave(&chan->lock, flags); 1161 1162 dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n", 1163 chan->id, 1164 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE), 1165 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR)); 1166 dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n", 1167 chan->id, 1168 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE), 1169 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR)); 1170 1171 xilinx_dpdma_chan_disable(chan); 1172 chan->running = false; 1173 1174 if (!chan->desc.active) 1175 goto out_unlock; 1176 1177 active = chan->desc.active; 1178 chan->desc.active = NULL; 1179 1180 xilinx_dpdma_chan_dump_tx_desc(chan, active); 1181 1182 if (active->error) 1183 dev_dbg(xdev->dev, "chan%u: repeated error on desc\n", 1184 chan->id); 1185 1186 /* Reschedule if there's no new descriptor */ 1187 if (!chan->desc.pending && 1188 list_empty(&chan->vchan.desc_issued)) { 1189 active->error = true; 1190 list_add_tail(&active->vdesc.node, 1191 &chan->vchan.desc_issued); 1192 } else { 1193 xilinx_dpdma_chan_free_tx_desc(&active->vdesc); 1194 } 1195 1196 out_unlock: 1197 spin_unlock_irqrestore(&chan->lock, flags); 1198 } 1199 1200 /* ----------------------------------------------------------------------------- 1201 * DMA Engine Operations 1202 */ 1203 1204 static struct dma_async_tx_descriptor * 1205 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, 1206 struct dma_interleaved_template *xt, 1207 unsigned long flags) 1208 { 1209 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1210 struct xilinx_dpdma_tx_desc *desc; 1211 1212 if (xt->dir != DMA_MEM_TO_DEV) 1213 return NULL; 1214 1215 if (!xt->numf || !xt->sgl[0].size) 1216 return NULL; 1217 1218 if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT)) 1219 return NULL; 1220 1221 desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt); 1222 if (!desc) 1223 return NULL; 1224 1225 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK); 1226 1227 return &desc->vdesc.tx; 1228 } 1229 1230 /** 1231 * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel 1232 * @dchan: DMA channel 1233 * 1234 * Allocate a descriptor pool for the channel. 1235 * 1236 * Return: 0 on success, or -ENOMEM if failed to allocate a pool. 1237 */ 1238 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan) 1239 { 1240 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1241 size_t align = __alignof__(struct xilinx_dpdma_sw_desc); 1242 1243 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev), 1244 chan->xdev->dev, 1245 sizeof(struct xilinx_dpdma_sw_desc), 1246 align, 0); 1247 if (!chan->desc_pool) { 1248 dev_err(chan->xdev->dev, 1249 "chan%u: failed to allocate a descriptor pool\n", 1250 chan->id); 1251 return -ENOMEM; 1252 } 1253 1254 return 0; 1255 } 1256 1257 /** 1258 * xilinx_dpdma_free_chan_resources - Free all resources for the channel 1259 * @dchan: DMA channel 1260 * 1261 * Free resources associated with the virtual DMA channel, and destroy the 1262 * descriptor pool. 1263 */ 1264 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan) 1265 { 1266 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1267 1268 vchan_free_chan_resources(&chan->vchan); 1269 1270 dma_pool_destroy(chan->desc_pool); 1271 chan->desc_pool = NULL; 1272 } 1273 1274 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) 1275 { 1276 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1277 unsigned long flags; 1278 1279 spin_lock_irqsave(&chan->vchan.lock, flags); 1280 if (vchan_issue_pending(&chan->vchan)) 1281 xilinx_dpdma_chan_queue_transfer(chan); 1282 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1283 } 1284 1285 static int xilinx_dpdma_config(struct dma_chan *dchan, 1286 struct dma_slave_config *config) 1287 { 1288 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1289 unsigned long flags; 1290 1291 /* 1292 * The destination address doesn't need to be specified as the DPDMA is 1293 * hardwired to the destination (the DP controller). The transfer 1294 * width, burst size and port window size are thus meaningless, they're 1295 * fixed both on the DPDMA side and on the DP controller side. 1296 */ 1297 1298 spin_lock_irqsave(&chan->lock, flags); 1299 1300 /* 1301 * Abuse the slave_id to indicate that the channel is part of a video 1302 * group. 1303 */ 1304 if (chan->id <= ZYNQMP_DPDMA_VIDEO2) 1305 chan->video_group = config->slave_id != 0; 1306 1307 spin_unlock_irqrestore(&chan->lock, flags); 1308 1309 return 0; 1310 } 1311 1312 static int xilinx_dpdma_pause(struct dma_chan *dchan) 1313 { 1314 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan)); 1315 1316 return 0; 1317 } 1318 1319 static int xilinx_dpdma_resume(struct dma_chan *dchan) 1320 { 1321 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan)); 1322 1323 return 0; 1324 } 1325 1326 /** 1327 * xilinx_dpdma_terminate_all - Terminate the channel and descriptors 1328 * @dchan: DMA channel 1329 * 1330 * Pause the channel without waiting for ongoing transfers to complete. Waiting 1331 * for completion is performed by xilinx_dpdma_synchronize() that will disable 1332 * the channel to complete the stop. 1333 * 1334 * All the descriptors associated with the channel that are guaranteed not to 1335 * be touched by the hardware. The pending and active descriptor are not 1336 * touched, and will be freed either upon completion, or by 1337 * xilinx_dpdma_synchronize(). 1338 * 1339 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. 1340 */ 1341 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan) 1342 { 1343 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1344 struct xilinx_dpdma_device *xdev = chan->xdev; 1345 LIST_HEAD(descriptors); 1346 unsigned long flags; 1347 unsigned int i; 1348 1349 /* Pause the channel (including the whole video group if applicable). */ 1350 if (chan->video_group) { 1351 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { 1352 if (xdev->chan[i]->video_group && 1353 xdev->chan[i]->running) { 1354 xilinx_dpdma_chan_pause(xdev->chan[i]); 1355 xdev->chan[i]->video_group = false; 1356 } 1357 } 1358 } else { 1359 xilinx_dpdma_chan_pause(chan); 1360 } 1361 1362 /* Gather all the descriptors we can free and free them. */ 1363 spin_lock_irqsave(&chan->vchan.lock, flags); 1364 vchan_get_all_descriptors(&chan->vchan, &descriptors); 1365 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1366 1367 vchan_dma_desc_free_list(&chan->vchan, &descriptors); 1368 1369 return 0; 1370 } 1371 1372 /** 1373 * xilinx_dpdma_synchronize - Synchronize callback execution 1374 * @dchan: DMA channel 1375 * 1376 * Synchronizing callback execution ensures that all previously issued 1377 * transfers have completed and all associated callbacks have been called and 1378 * have returned. 1379 * 1380 * This function waits for the DMA channel to stop. It assumes it has been 1381 * paused by a previous call to dmaengine_terminate_async(), and that no new 1382 * pending descriptors have been issued with dma_async_issue_pending(). The 1383 * behaviour is undefined otherwise. 1384 */ 1385 static void xilinx_dpdma_synchronize(struct dma_chan *dchan) 1386 { 1387 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1388 unsigned long flags; 1389 1390 xilinx_dpdma_chan_stop(chan); 1391 1392 spin_lock_irqsave(&chan->vchan.lock, flags); 1393 if (chan->desc.pending) { 1394 vchan_terminate_vdesc(&chan->desc.pending->vdesc); 1395 chan->desc.pending = NULL; 1396 } 1397 if (chan->desc.active) { 1398 vchan_terminate_vdesc(&chan->desc.active->vdesc); 1399 chan->desc.active = NULL; 1400 } 1401 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1402 1403 vchan_synchronize(&chan->vchan); 1404 } 1405 1406 /* ----------------------------------------------------------------------------- 1407 * Interrupt and Tasklet Handling 1408 */ 1409 1410 /** 1411 * xilinx_dpdma_err - Detect any global error 1412 * @isr: Interrupt Status Register 1413 * @eisr: Error Interrupt Status Register 1414 * 1415 * Return: True if any global error occurs, or false otherwise. 1416 */ 1417 static bool xilinx_dpdma_err(u32 isr, u32 eisr) 1418 { 1419 if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR || 1420 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR) 1421 return true; 1422 1423 return false; 1424 } 1425 1426 /** 1427 * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt 1428 * @xdev: DPDMA device 1429 * @isr: masked Interrupt Status Register 1430 * @eisr: Error Interrupt Status Register 1431 * 1432 * Handle if any error occurs based on @isr and @eisr. This function disables 1433 * corresponding error interrupts, and those should be re-enabled once handling 1434 * is done. 1435 */ 1436 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev, 1437 u32 isr, u32 eisr) 1438 { 1439 bool err = xilinx_dpdma_err(isr, eisr); 1440 unsigned int i; 1441 1442 dev_dbg_ratelimited(xdev->dev, 1443 "error irq: isr = 0x%08x, eisr = 0x%08x\n", 1444 isr, eisr); 1445 1446 /* Disable channel error interrupts until errors are handled. */ 1447 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, 1448 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR); 1449 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, 1450 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR); 1451 1452 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1453 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr)) 1454 tasklet_schedule(&xdev->chan[i]->err_task); 1455 } 1456 1457 /** 1458 * xilinx_dpdma_enable_irq - Enable interrupts 1459 * @xdev: DPDMA device 1460 * 1461 * Enable interrupts. 1462 */ 1463 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev) 1464 { 1465 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL); 1466 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL); 1467 } 1468 1469 /** 1470 * xilinx_dpdma_disable_irq - Disable interrupts 1471 * @xdev: DPDMA device 1472 * 1473 * Disable interrupts. 1474 */ 1475 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) 1476 { 1477 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL); 1478 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL); 1479 } 1480 1481 /** 1482 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling 1483 * @t: pointer to the tasklet associated with this handler 1484 * 1485 * Per channel error handling tasklet. This function waits for the outstanding 1486 * transaction to complete and triggers error handling. After error handling, 1487 * re-enable channel error interrupts, and restart the channel if needed. 1488 */ 1489 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t) 1490 { 1491 struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task); 1492 struct xilinx_dpdma_device *xdev = chan->xdev; 1493 unsigned long flags; 1494 1495 /* Proceed error handling even when polling fails. */ 1496 xilinx_dpdma_chan_poll_no_ostand(chan); 1497 1498 xilinx_dpdma_chan_handle_err(chan); 1499 1500 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, 1501 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id); 1502 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, 1503 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); 1504 1505 spin_lock_irqsave(&chan->lock, flags); 1506 xilinx_dpdma_chan_queue_transfer(chan); 1507 spin_unlock_irqrestore(&chan->lock, flags); 1508 } 1509 1510 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data) 1511 { 1512 struct xilinx_dpdma_device *xdev = data; 1513 unsigned long mask; 1514 unsigned int i; 1515 u32 status; 1516 u32 error; 1517 1518 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR); 1519 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR); 1520 if (!status && !error) 1521 return IRQ_NONE; 1522 1523 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status); 1524 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error); 1525 1526 if (status & XILINX_DPDMA_INTR_VSYNC) { 1527 /* 1528 * There's a single VSYNC interrupt that needs to be processed 1529 * by each running channel to update the active descriptor. 1530 */ 1531 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { 1532 struct xilinx_dpdma_chan *chan = xdev->chan[i]; 1533 1534 if (chan) 1535 xilinx_dpdma_chan_vsync_irq(chan); 1536 } 1537 } 1538 1539 mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status); 1540 if (mask) { 1541 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) 1542 xilinx_dpdma_chan_done_irq(xdev->chan[i]); 1543 } 1544 1545 mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status); 1546 if (mask) { 1547 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) 1548 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]); 1549 } 1550 1551 mask = status & XILINX_DPDMA_INTR_ERR_ALL; 1552 if (mask || error) 1553 xilinx_dpdma_handle_err_irq(xdev, mask, error); 1554 1555 return IRQ_HANDLED; 1556 } 1557 1558 /* ----------------------------------------------------------------------------- 1559 * Initialization & Cleanup 1560 */ 1561 1562 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev, 1563 unsigned int chan_id) 1564 { 1565 struct xilinx_dpdma_chan *chan; 1566 1567 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 1568 if (!chan) 1569 return -ENOMEM; 1570 1571 chan->id = chan_id; 1572 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE 1573 + XILINX_DPDMA_CH_OFFSET * chan->id; 1574 chan->running = false; 1575 chan->xdev = xdev; 1576 1577 spin_lock_init(&chan->lock); 1578 init_waitqueue_head(&chan->wait_to_stop); 1579 1580 tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task); 1581 1582 chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc; 1583 vchan_init(&chan->vchan, &xdev->common); 1584 1585 xdev->chan[chan->id] = chan; 1586 1587 return 0; 1588 } 1589 1590 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan) 1591 { 1592 if (!chan) 1593 return; 1594 1595 tasklet_kill(&chan->err_task); 1596 list_del(&chan->vchan.chan.device_node); 1597 } 1598 1599 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 1600 struct of_dma *ofdma) 1601 { 1602 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data; 1603 u32 chan_id = dma_spec->args[0]; 1604 1605 if (chan_id >= ARRAY_SIZE(xdev->chan)) 1606 return NULL; 1607 1608 if (!xdev->chan[chan_id]) 1609 return NULL; 1610 1611 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan); 1612 } 1613 1614 static void dpdma_hw_init(struct xilinx_dpdma_device *xdev) 1615 { 1616 unsigned int i; 1617 void __iomem *reg; 1618 1619 /* Disable all interrupts */ 1620 xilinx_dpdma_disable_irq(xdev); 1621 1622 /* Stop all channels */ 1623 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { 1624 reg = xdev->reg + XILINX_DPDMA_CH_BASE 1625 + XILINX_DPDMA_CH_OFFSET * i; 1626 dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); 1627 } 1628 1629 /* Clear the interrupt status registers */ 1630 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL); 1631 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL); 1632 } 1633 1634 static int xilinx_dpdma_probe(struct platform_device *pdev) 1635 { 1636 struct xilinx_dpdma_device *xdev; 1637 struct dma_device *ddev; 1638 unsigned int i; 1639 int ret; 1640 1641 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 1642 if (!xdev) 1643 return -ENOMEM; 1644 1645 xdev->dev = &pdev->dev; 1646 xdev->ext_addr = sizeof(dma_addr_t) > 4; 1647 1648 INIT_LIST_HEAD(&xdev->common.channels); 1649 1650 platform_set_drvdata(pdev, xdev); 1651 1652 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk"); 1653 if (IS_ERR(xdev->axi_clk)) 1654 return PTR_ERR(xdev->axi_clk); 1655 1656 xdev->reg = devm_platform_ioremap_resource(pdev, 0); 1657 if (IS_ERR(xdev->reg)) 1658 return PTR_ERR(xdev->reg); 1659 1660 dpdma_hw_init(xdev); 1661 1662 xdev->irq = platform_get_irq(pdev, 0); 1663 if (xdev->irq < 0) { 1664 dev_err(xdev->dev, "failed to get platform irq\n"); 1665 return xdev->irq; 1666 } 1667 1668 ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED, 1669 dev_name(xdev->dev), xdev); 1670 if (ret) { 1671 dev_err(xdev->dev, "failed to request IRQ\n"); 1672 return ret; 1673 } 1674 1675 ddev = &xdev->common; 1676 ddev->dev = &pdev->dev; 1677 1678 dma_cap_set(DMA_SLAVE, ddev->cap_mask); 1679 dma_cap_set(DMA_PRIVATE, ddev->cap_mask); 1680 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask); 1681 dma_cap_set(DMA_REPEAT, ddev->cap_mask); 1682 dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask); 1683 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1); 1684 1685 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources; 1686 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources; 1687 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma; 1688 /* TODO: Can we achieve better granularity ? */ 1689 ddev->device_tx_status = dma_cookie_status; 1690 ddev->device_issue_pending = xilinx_dpdma_issue_pending; 1691 ddev->device_config = xilinx_dpdma_config; 1692 ddev->device_pause = xilinx_dpdma_pause; 1693 ddev->device_resume = xilinx_dpdma_resume; 1694 ddev->device_terminate_all = xilinx_dpdma_terminate_all; 1695 ddev->device_synchronize = xilinx_dpdma_synchronize; 1696 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED); 1697 ddev->directions = BIT(DMA_MEM_TO_DEV); 1698 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1699 1700 for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) { 1701 ret = xilinx_dpdma_chan_init(xdev, i); 1702 if (ret < 0) { 1703 dev_err(xdev->dev, "failed to initialize channel %u\n", 1704 i); 1705 goto error; 1706 } 1707 } 1708 1709 ret = clk_prepare_enable(xdev->axi_clk); 1710 if (ret) { 1711 dev_err(xdev->dev, "failed to enable the axi clock\n"); 1712 goto error; 1713 } 1714 1715 ret = dma_async_device_register(ddev); 1716 if (ret) { 1717 dev_err(xdev->dev, "failed to register the dma device\n"); 1718 goto error_dma_async; 1719 } 1720 1721 ret = of_dma_controller_register(xdev->dev->of_node, 1722 of_dma_xilinx_xlate, ddev); 1723 if (ret) { 1724 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n"); 1725 goto error_of_dma; 1726 } 1727 1728 xilinx_dpdma_enable_irq(xdev); 1729 1730 xilinx_dpdma_debugfs_init(xdev); 1731 1732 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n"); 1733 1734 return 0; 1735 1736 error_of_dma: 1737 dma_async_device_unregister(ddev); 1738 error_dma_async: 1739 clk_disable_unprepare(xdev->axi_clk); 1740 error: 1741 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1742 xilinx_dpdma_chan_remove(xdev->chan[i]); 1743 1744 free_irq(xdev->irq, xdev); 1745 1746 return ret; 1747 } 1748 1749 static int xilinx_dpdma_remove(struct platform_device *pdev) 1750 { 1751 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev); 1752 unsigned int i; 1753 1754 /* Start by disabling the IRQ to avoid races during cleanup. */ 1755 free_irq(xdev->irq, xdev); 1756 1757 xilinx_dpdma_disable_irq(xdev); 1758 of_dma_controller_free(pdev->dev.of_node); 1759 dma_async_device_unregister(&xdev->common); 1760 clk_disable_unprepare(xdev->axi_clk); 1761 1762 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1763 xilinx_dpdma_chan_remove(xdev->chan[i]); 1764 1765 return 0; 1766 } 1767 1768 static const struct of_device_id xilinx_dpdma_of_match[] = { 1769 { .compatible = "xlnx,zynqmp-dpdma",}, 1770 { /* end of table */ }, 1771 }; 1772 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); 1773 1774 static struct platform_driver xilinx_dpdma_driver = { 1775 .probe = xilinx_dpdma_probe, 1776 .remove = xilinx_dpdma_remove, 1777 .driver = { 1778 .name = "xilinx-zynqmp-dpdma", 1779 .of_match_table = xilinx_dpdma_of_match, 1780 }, 1781 }; 1782 1783 module_platform_driver(xilinx_dpdma_driver); 1784 1785 MODULE_AUTHOR("Xilinx, Inc."); 1786 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver"); 1787 MODULE_LICENSE("GPL v2"); 1788