1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Actions Semi Owl SoCs DMA driver 4 // 5 // Copyright (c) 2014 Actions Semi Inc. 6 // Author: David Liu <liuwei@actions-semi.com> 7 // 8 // Copyright (c) 2018 Linaro Ltd. 9 // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 10 11 #include <linux/bitops.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/dmapool.h> 17 #include <linux/err.h> 18 #include <linux/init.h> 19 #include <linux/interrupt.h> 20 #include <linux/io.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/of_device.h> 24 #include <linux/of_dma.h> 25 #include <linux/slab.h> 26 #include "virt-dma.h" 27 28 #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff 29 30 /* Global DMA Controller Registers */ 31 #define OWL_DMA_IRQ_PD0 0x00 32 #define OWL_DMA_IRQ_PD1 0x04 33 #define OWL_DMA_IRQ_PD2 0x08 34 #define OWL_DMA_IRQ_PD3 0x0C 35 #define OWL_DMA_IRQ_EN0 0x10 36 #define OWL_DMA_IRQ_EN1 0x14 37 #define OWL_DMA_IRQ_EN2 0x18 38 #define OWL_DMA_IRQ_EN3 0x1C 39 #define OWL_DMA_SECURE_ACCESS_CTL 0x20 40 #define OWL_DMA_NIC_QOS 0x24 41 #define OWL_DMA_DBGSEL 0x28 42 #define OWL_DMA_IDLE_STAT 0x2C 43 44 /* Channel Registers */ 45 #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) 46 #define OWL_DMAX_MODE 0x00 47 #define OWL_DMAX_SOURCE 0x04 48 #define OWL_DMAX_DESTINATION 0x08 49 #define OWL_DMAX_FRAME_LEN 0x0C 50 #define OWL_DMAX_FRAME_CNT 0x10 51 #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 52 #define OWL_DMAX_REMAIN_CNT 0x18 53 #define OWL_DMAX_SOURCE_STRIDE 0x1C 54 #define OWL_DMAX_DESTINATION_STRIDE 0x20 55 #define OWL_DMAX_START 0x24 56 #define OWL_DMAX_PAUSE 0x28 57 #define OWL_DMAX_CHAINED_CTL 0x2C 58 #define OWL_DMAX_CONSTANT 0x30 59 #define OWL_DMAX_LINKLIST_CTL 0x34 60 #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 61 #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C 62 #define OWL_DMAX_INT_CTL 0x40 63 #define OWL_DMAX_INT_STATUS 0x44 64 #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 65 #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C 66 67 /* OWL_DMAX_MODE Bits */ 68 #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) 69 #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) 70 #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) 71 #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) 72 #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) 73 #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) 74 #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) 75 #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) 76 #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) 77 #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) 78 #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) 79 #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) 80 #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) 81 #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) 82 #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) 83 #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) 84 #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) 85 #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) 86 #define OWL_DMA_MODE_CB BIT(23) 87 #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) 88 #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) 89 #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) 90 #define OWL_DMA_MODE_CFE BIT(29) 91 #define OWL_DMA_MODE_LME BIT(30) 92 #define OWL_DMA_MODE_CME BIT(31) 93 94 /* OWL_DMAX_LINKLIST_CTL Bits */ 95 #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) 96 #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) 97 #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) 98 #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) 99 #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) 100 #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) 101 #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) 102 #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) 103 #define OWL_DMA_LLC_SUSPEND BIT(16) 104 105 /* OWL_DMAX_INT_CTL Bits */ 106 #define OWL_DMA_INTCTL_BLOCK BIT(0) 107 #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) 108 #define OWL_DMA_INTCTL_FRAME BIT(2) 109 #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) 110 #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) 111 112 /* OWL_DMAX_INT_STATUS Bits */ 113 #define OWL_DMA_INTSTAT_BLOCK BIT(0) 114 #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) 115 #define OWL_DMA_INTSTAT_FRAME BIT(2) 116 #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) 117 #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) 118 119 /* Pack shift and newshift in a single word */ 120 #define BIT_FIELD(val, width, shift, newshift) \ 121 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) 122 123 /* Frame count value is fixed as 1 */ 124 #define FCNT_VAL 0x1 125 126 /** 127 * owl_dmadesc_offsets - Describe DMA descriptor, hardware link 128 * list for dma transfer 129 * @OWL_DMADESC_NEXT_LLI: physical address of the next link list 130 * @OWL_DMADESC_SADDR: source physical address 131 * @OWL_DMADESC_DADDR: destination physical address 132 * @OWL_DMADESC_FLEN: frame length 133 * @OWL_DMADESC_SRC_STRIDE: source stride 134 * @OWL_DMADESC_DST_STRIDE: destination stride 135 * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config 136 * @OWL_DMADESC_CTRLB: interrupt config 137 * @OWL_DMADESC_CONST_NUM: data for constant fill 138 */ 139 enum owl_dmadesc_offsets { 140 OWL_DMADESC_NEXT_LLI = 0, 141 OWL_DMADESC_SADDR, 142 OWL_DMADESC_DADDR, 143 OWL_DMADESC_FLEN, 144 OWL_DMADESC_SRC_STRIDE, 145 OWL_DMADESC_DST_STRIDE, 146 OWL_DMADESC_CTRLA, 147 OWL_DMADESC_CTRLB, 148 OWL_DMADESC_CONST_NUM, 149 OWL_DMADESC_SIZE 150 }; 151 152 enum owl_dma_id { 153 S900_DMA, 154 S700_DMA, 155 }; 156 157 /** 158 * struct owl_dma_lli - Link list for dma transfer 159 * @hw: hardware link list 160 * @phys: physical address of hardware link list 161 * @node: node for txd's lli_list 162 */ 163 struct owl_dma_lli { 164 u32 hw[OWL_DMADESC_SIZE]; 165 dma_addr_t phys; 166 struct list_head node; 167 }; 168 169 /** 170 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor 171 * @vd: virtual DMA descriptor 172 * @lli_list: link list of lli nodes 173 * @cyclic: flag to indicate cyclic transfers 174 */ 175 struct owl_dma_txd { 176 struct virt_dma_desc vd; 177 struct list_head lli_list; 178 bool cyclic; 179 }; 180 181 /** 182 * struct owl_dma_pchan - Holder for the physical channels 183 * @id: physical index to this channel 184 * @base: virtual memory base for the dma channel 185 * @vchan: the virtual channel currently being served by this physical channel 186 */ 187 struct owl_dma_pchan { 188 u32 id; 189 void __iomem *base; 190 struct owl_dma_vchan *vchan; 191 }; 192 193 /** 194 * struct owl_dma_pchan - Wrapper for DMA ENGINE channel 195 * @vc: wrappped virtual channel 196 * @pchan: the physical channel utilized by this channel 197 * @txd: active transaction on this channel 198 * @cfg: slave configuration for this channel 199 * @drq: physical DMA request ID for this channel 200 */ 201 struct owl_dma_vchan { 202 struct virt_dma_chan vc; 203 struct owl_dma_pchan *pchan; 204 struct owl_dma_txd *txd; 205 struct dma_slave_config cfg; 206 u8 drq; 207 }; 208 209 /** 210 * struct owl_dma - Holder for the Owl DMA controller 211 * @dma: dma engine for this instance 212 * @base: virtual memory base for the DMA controller 213 * @clk: clock for the DMA controller 214 * @lock: a lock to use when change DMA controller global register 215 * @lli_pool: a pool for the LLI descriptors 216 * @irq: interrupt ID for the DMA controller 217 * @nr_pchans: the number of physical channels 218 * @pchans: array of data for the physical channels 219 * @nr_vchans: the number of physical channels 220 * @vchans: array of data for the physical channels 221 * @devid: device id based on OWL SoC 222 */ 223 struct owl_dma { 224 struct dma_device dma; 225 void __iomem *base; 226 struct clk *clk; 227 spinlock_t lock; 228 struct dma_pool *lli_pool; 229 int irq; 230 231 unsigned int nr_pchans; 232 struct owl_dma_pchan *pchans; 233 234 unsigned int nr_vchans; 235 struct owl_dma_vchan *vchans; 236 enum owl_dma_id devid; 237 }; 238 239 static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, 240 u32 val, bool state) 241 { 242 u32 regval; 243 244 regval = readl(pchan->base + reg); 245 246 if (state) 247 regval |= val; 248 else 249 regval &= ~val; 250 251 writel(val, pchan->base + reg); 252 } 253 254 static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) 255 { 256 writel(data, pchan->base + reg); 257 } 258 259 static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) 260 { 261 return readl(pchan->base + reg); 262 } 263 264 static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) 265 { 266 u32 regval; 267 268 regval = readl(od->base + reg); 269 270 if (state) 271 regval |= val; 272 else 273 regval &= ~val; 274 275 writel(val, od->base + reg); 276 } 277 278 static void dma_writel(struct owl_dma *od, u32 reg, u32 data) 279 { 280 writel(data, od->base + reg); 281 } 282 283 static u32 dma_readl(struct owl_dma *od, u32 reg) 284 { 285 return readl(od->base + reg); 286 } 287 288 static inline struct owl_dma *to_owl_dma(struct dma_device *dd) 289 { 290 return container_of(dd, struct owl_dma, dma); 291 } 292 293 static struct device *chan2dev(struct dma_chan *chan) 294 { 295 return &chan->dev->device; 296 } 297 298 static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) 299 { 300 return container_of(chan, struct owl_dma_vchan, vc.chan); 301 } 302 303 static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) 304 { 305 return container_of(tx, struct owl_dma_txd, vd.tx); 306 } 307 308 static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) 309 { 310 u32 ctl; 311 312 ctl = BIT_FIELD(mode, 4, 28, 28) | 313 BIT_FIELD(mode, 8, 16, 20) | 314 BIT_FIELD(mode, 4, 8, 16) | 315 BIT_FIELD(mode, 6, 0, 10) | 316 BIT_FIELD(llc_ctl, 2, 10, 8) | 317 BIT_FIELD(llc_ctl, 2, 8, 6); 318 319 return ctl; 320 } 321 322 static inline u32 llc_hw_ctrlb(u32 int_ctl) 323 { 324 u32 ctl; 325 326 /* 327 * Irrespective of the SoC, ctrlb value starts filling from 328 * bit 18. 329 */ 330 ctl = BIT_FIELD(int_ctl, 7, 0, 18); 331 332 return ctl; 333 } 334 335 static u32 llc_hw_flen(struct owl_dma_lli *lli) 336 { 337 return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); 338 } 339 340 static void owl_dma_free_lli(struct owl_dma *od, 341 struct owl_dma_lli *lli) 342 { 343 list_del(&lli->node); 344 dma_pool_free(od->lli_pool, lli, lli->phys); 345 } 346 347 static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) 348 { 349 struct owl_dma_lli *lli; 350 dma_addr_t phys; 351 352 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); 353 if (!lli) 354 return NULL; 355 356 INIT_LIST_HEAD(&lli->node); 357 lli->phys = phys; 358 359 return lli; 360 } 361 362 static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, 363 struct owl_dma_lli *prev, 364 struct owl_dma_lli *next, 365 bool is_cyclic) 366 { 367 if (!is_cyclic) 368 list_add_tail(&next->node, &txd->lli_list); 369 370 if (prev) { 371 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; 372 prev->hw[OWL_DMADESC_CTRLA] |= 373 llc_hw_ctrla(OWL_DMA_MODE_LME, 0); 374 } 375 376 return next; 377 } 378 379 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, 380 struct owl_dma_lli *lli, 381 dma_addr_t src, dma_addr_t dst, 382 u32 len, enum dma_transfer_direction dir, 383 struct dma_slave_config *sconfig, 384 bool is_cyclic) 385 { 386 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 387 u32 mode, ctrlb; 388 389 mode = OWL_DMA_MODE_PW(0); 390 391 switch (dir) { 392 case DMA_MEM_TO_MEM: 393 mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | 394 OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | 395 OWL_DMA_MODE_DAM_INC; 396 397 break; 398 case DMA_MEM_TO_DEV: 399 mode |= OWL_DMA_MODE_TS(vchan->drq) 400 | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV 401 | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; 402 403 /* 404 * Hardware only supports 32bit and 8bit buswidth. Since the 405 * default is 32bit, select 8bit only when requested. 406 */ 407 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) 408 mode |= OWL_DMA_MODE_NDDBW_8BIT; 409 410 break; 411 case DMA_DEV_TO_MEM: 412 mode |= OWL_DMA_MODE_TS(vchan->drq) 413 | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU 414 | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; 415 416 /* 417 * Hardware only supports 32bit and 8bit buswidth. Since the 418 * default is 32bit, select 8bit only when requested. 419 */ 420 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) 421 mode |= OWL_DMA_MODE_NDDBW_8BIT; 422 423 break; 424 default: 425 return -EINVAL; 426 } 427 428 lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, 429 OWL_DMA_LLC_SAV_LOAD_NEXT | 430 OWL_DMA_LLC_DAV_LOAD_NEXT); 431 432 if (is_cyclic) 433 ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); 434 else 435 ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); 436 437 lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ 438 lli->hw[OWL_DMADESC_SADDR] = src; 439 lli->hw[OWL_DMADESC_DADDR] = dst; 440 lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; 441 lli->hw[OWL_DMADESC_DST_STRIDE] = 0; 442 443 if (od->devid == S700_DMA) { 444 /* Max frame length is 1MB */ 445 lli->hw[OWL_DMADESC_FLEN] = len; 446 /* 447 * On S700, word starts from offset 0x1C is shared between 448 * frame count and ctrlb, where first 12 bits are for frame 449 * count and rest of 20 bits are for ctrlb. 450 */ 451 lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; 452 } else { 453 /* 454 * On S900, word starts from offset 0xC is shared between 455 * frame length (max frame length is 1MB) and frame count, 456 * where first 20 bits are for frame length and rest of 457 * 12 bits are for frame count. 458 */ 459 lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; 460 lli->hw[OWL_DMADESC_CTRLB] = ctrlb; 461 } 462 463 return 0; 464 } 465 466 static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, 467 struct owl_dma_vchan *vchan) 468 { 469 struct owl_dma_pchan *pchan = NULL; 470 unsigned long flags; 471 int i; 472 473 for (i = 0; i < od->nr_pchans; i++) { 474 pchan = &od->pchans[i]; 475 476 spin_lock_irqsave(&od->lock, flags); 477 if (!pchan->vchan) { 478 pchan->vchan = vchan; 479 spin_unlock_irqrestore(&od->lock, flags); 480 break; 481 } 482 483 spin_unlock_irqrestore(&od->lock, flags); 484 } 485 486 return pchan; 487 } 488 489 static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) 490 { 491 unsigned int val; 492 493 val = dma_readl(od, OWL_DMA_IDLE_STAT); 494 495 return !(val & (1 << pchan->id)); 496 } 497 498 static void owl_dma_terminate_pchan(struct owl_dma *od, 499 struct owl_dma_pchan *pchan) 500 { 501 unsigned long flags; 502 u32 irq_pd; 503 504 pchan_writel(pchan, OWL_DMAX_START, 0); 505 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 506 507 spin_lock_irqsave(&od->lock, flags); 508 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); 509 510 irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); 511 if (irq_pd & (1 << pchan->id)) { 512 dev_warn(od->dma.dev, 513 "terminating pchan %d that still has pending irq\n", 514 pchan->id); 515 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); 516 } 517 518 pchan->vchan = NULL; 519 520 spin_unlock_irqrestore(&od->lock, flags); 521 } 522 523 static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) 524 { 525 pchan_writel(pchan, 1, OWL_DMAX_PAUSE); 526 } 527 528 static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) 529 { 530 pchan_writel(pchan, 0, OWL_DMAX_PAUSE); 531 } 532 533 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) 534 { 535 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 536 struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); 537 struct owl_dma_pchan *pchan = vchan->pchan; 538 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); 539 struct owl_dma_lli *lli; 540 unsigned long flags; 541 u32 int_ctl; 542 543 list_del(&vd->node); 544 545 vchan->txd = txd; 546 547 /* Wait for channel inactive */ 548 while (owl_dma_pchan_busy(od, pchan)) 549 cpu_relax(); 550 551 lli = list_first_entry(&txd->lli_list, 552 struct owl_dma_lli, node); 553 554 if (txd->cyclic) 555 int_ctl = OWL_DMA_INTCTL_BLOCK; 556 else 557 int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; 558 559 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); 560 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, 561 OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); 562 pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); 563 pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); 564 565 /* Clear IRQ status for this pchan */ 566 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 567 568 spin_lock_irqsave(&od->lock, flags); 569 570 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); 571 572 spin_unlock_irqrestore(&od->lock, flags); 573 574 dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); 575 576 /* Start DMA transfer for this pchan */ 577 pchan_writel(pchan, OWL_DMAX_START, 0x1); 578 579 return 0; 580 } 581 582 static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) 583 { 584 /* Ensure that the physical channel is stopped */ 585 owl_dma_terminate_pchan(od, vchan->pchan); 586 587 vchan->pchan = NULL; 588 } 589 590 static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) 591 { 592 struct owl_dma *od = dev_id; 593 struct owl_dma_vchan *vchan; 594 struct owl_dma_pchan *pchan; 595 unsigned long pending; 596 int i; 597 unsigned int global_irq_pending, chan_irq_pending; 598 599 spin_lock(&od->lock); 600 601 pending = dma_readl(od, OWL_DMA_IRQ_PD0); 602 603 /* Clear IRQ status for each pchan */ 604 for_each_set_bit(i, &pending, od->nr_pchans) { 605 pchan = &od->pchans[i]; 606 pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 607 } 608 609 /* Clear pending IRQ */ 610 dma_writel(od, OWL_DMA_IRQ_PD0, pending); 611 612 /* Check missed pending IRQ */ 613 for (i = 0; i < od->nr_pchans; i++) { 614 pchan = &od->pchans[i]; 615 chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & 616 pchan_readl(pchan, OWL_DMAX_INT_STATUS); 617 618 /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ 619 dma_readl(od, OWL_DMA_IRQ_PD0); 620 621 global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); 622 623 if (chan_irq_pending && !(global_irq_pending & BIT(i))) { 624 dev_dbg(od->dma.dev, 625 "global and channel IRQ pending match err\n"); 626 627 /* Clear IRQ status for this pchan */ 628 pchan_update(pchan, OWL_DMAX_INT_STATUS, 629 0xff, false); 630 631 /* Update global IRQ pending */ 632 pending |= BIT(i); 633 } 634 } 635 636 spin_unlock(&od->lock); 637 638 for_each_set_bit(i, &pending, od->nr_pchans) { 639 struct owl_dma_txd *txd; 640 641 pchan = &od->pchans[i]; 642 643 vchan = pchan->vchan; 644 if (!vchan) { 645 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", 646 pchan->id); 647 continue; 648 } 649 650 spin_lock(&vchan->vc.lock); 651 652 txd = vchan->txd; 653 if (txd) { 654 vchan->txd = NULL; 655 656 vchan_cookie_complete(&txd->vd); 657 658 /* 659 * Start the next descriptor (if any), 660 * otherwise free this channel. 661 */ 662 if (vchan_next_desc(&vchan->vc)) 663 owl_dma_start_next_txd(vchan); 664 else 665 owl_dma_phy_free(od, vchan); 666 } 667 668 spin_unlock(&vchan->vc.lock); 669 } 670 671 return IRQ_HANDLED; 672 } 673 674 static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) 675 { 676 struct owl_dma_lli *lli, *_lli; 677 678 if (unlikely(!txd)) 679 return; 680 681 list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) 682 owl_dma_free_lli(od, lli); 683 684 kfree(txd); 685 } 686 687 static void owl_dma_desc_free(struct virt_dma_desc *vd) 688 { 689 struct owl_dma *od = to_owl_dma(vd->tx.chan->device); 690 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); 691 692 owl_dma_free_txd(od, txd); 693 } 694 695 static int owl_dma_terminate_all(struct dma_chan *chan) 696 { 697 struct owl_dma *od = to_owl_dma(chan->device); 698 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 699 unsigned long flags; 700 LIST_HEAD(head); 701 702 spin_lock_irqsave(&vchan->vc.lock, flags); 703 704 if (vchan->pchan) 705 owl_dma_phy_free(od, vchan); 706 707 if (vchan->txd) { 708 owl_dma_desc_free(&vchan->txd->vd); 709 vchan->txd = NULL; 710 } 711 712 vchan_get_all_descriptors(&vchan->vc, &head); 713 714 spin_unlock_irqrestore(&vchan->vc.lock, flags); 715 716 vchan_dma_desc_free_list(&vchan->vc, &head); 717 718 return 0; 719 } 720 721 static int owl_dma_config(struct dma_chan *chan, 722 struct dma_slave_config *config) 723 { 724 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 725 726 /* Reject definitely invalid configurations */ 727 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 728 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 729 return -EINVAL; 730 731 memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); 732 733 return 0; 734 } 735 736 static int owl_dma_pause(struct dma_chan *chan) 737 { 738 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 739 unsigned long flags; 740 741 spin_lock_irqsave(&vchan->vc.lock, flags); 742 743 owl_dma_pause_pchan(vchan->pchan); 744 745 spin_unlock_irqrestore(&vchan->vc.lock, flags); 746 747 return 0; 748 } 749 750 static int owl_dma_resume(struct dma_chan *chan) 751 { 752 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 753 unsigned long flags; 754 755 if (!vchan->pchan && !vchan->txd) 756 return 0; 757 758 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); 759 760 spin_lock_irqsave(&vchan->vc.lock, flags); 761 762 owl_dma_resume_pchan(vchan->pchan); 763 764 spin_unlock_irqrestore(&vchan->vc.lock, flags); 765 766 return 0; 767 } 768 769 static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) 770 { 771 struct owl_dma_pchan *pchan; 772 struct owl_dma_txd *txd; 773 struct owl_dma_lli *lli; 774 unsigned int next_lli_phy; 775 size_t bytes; 776 777 pchan = vchan->pchan; 778 txd = vchan->txd; 779 780 if (!pchan || !txd) 781 return 0; 782 783 /* Get remain count of current node in link list */ 784 bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); 785 786 /* Loop through the preceding nodes to get total remaining bytes */ 787 if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { 788 next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); 789 list_for_each_entry(lli, &txd->lli_list, node) { 790 /* Start from the next active node */ 791 if (lli->phys == next_lli_phy) { 792 list_for_each_entry(lli, &txd->lli_list, node) 793 bytes += llc_hw_flen(lli); 794 break; 795 } 796 } 797 } 798 799 return bytes; 800 } 801 802 static enum dma_status owl_dma_tx_status(struct dma_chan *chan, 803 dma_cookie_t cookie, 804 struct dma_tx_state *state) 805 { 806 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 807 struct owl_dma_lli *lli; 808 struct virt_dma_desc *vd; 809 struct owl_dma_txd *txd; 810 enum dma_status ret; 811 unsigned long flags; 812 size_t bytes = 0; 813 814 ret = dma_cookie_status(chan, cookie, state); 815 if (ret == DMA_COMPLETE || !state) 816 return ret; 817 818 spin_lock_irqsave(&vchan->vc.lock, flags); 819 820 vd = vchan_find_desc(&vchan->vc, cookie); 821 if (vd) { 822 txd = to_owl_txd(&vd->tx); 823 list_for_each_entry(lli, &txd->lli_list, node) 824 bytes += llc_hw_flen(lli); 825 } else { 826 bytes = owl_dma_getbytes_chan(vchan); 827 } 828 829 spin_unlock_irqrestore(&vchan->vc.lock, flags); 830 831 dma_set_residue(state, bytes); 832 833 return ret; 834 } 835 836 static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) 837 { 838 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 839 struct owl_dma_pchan *pchan; 840 841 pchan = owl_dma_get_pchan(od, vchan); 842 if (!pchan) 843 return; 844 845 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); 846 847 vchan->pchan = pchan; 848 owl_dma_start_next_txd(vchan); 849 } 850 851 static void owl_dma_issue_pending(struct dma_chan *chan) 852 { 853 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 854 unsigned long flags; 855 856 spin_lock_irqsave(&vchan->vc.lock, flags); 857 if (vchan_issue_pending(&vchan->vc)) { 858 if (!vchan->pchan) 859 owl_dma_phy_alloc_and_start(vchan); 860 } 861 spin_unlock_irqrestore(&vchan->vc.lock, flags); 862 } 863 864 static struct dma_async_tx_descriptor 865 *owl_dma_prep_memcpy(struct dma_chan *chan, 866 dma_addr_t dst, dma_addr_t src, 867 size_t len, unsigned long flags) 868 { 869 struct owl_dma *od = to_owl_dma(chan->device); 870 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 871 struct owl_dma_txd *txd; 872 struct owl_dma_lli *lli, *prev = NULL; 873 size_t offset, bytes; 874 int ret; 875 876 if (!len) 877 return NULL; 878 879 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 880 if (!txd) 881 return NULL; 882 883 INIT_LIST_HEAD(&txd->lli_list); 884 885 /* Process the transfer as frame by frame */ 886 for (offset = 0; offset < len; offset += bytes) { 887 lli = owl_dma_alloc_lli(od); 888 if (!lli) { 889 dev_warn(chan2dev(chan), "failed to allocate lli\n"); 890 goto err_txd_free; 891 } 892 893 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); 894 895 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, 896 bytes, DMA_MEM_TO_MEM, 897 &vchan->cfg, txd->cyclic); 898 if (ret) { 899 dev_warn(chan2dev(chan), "failed to config lli\n"); 900 goto err_txd_free; 901 } 902 903 prev = owl_dma_add_lli(txd, prev, lli, false); 904 } 905 906 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 907 908 err_txd_free: 909 owl_dma_free_txd(od, txd); 910 return NULL; 911 } 912 913 static struct dma_async_tx_descriptor 914 *owl_dma_prep_slave_sg(struct dma_chan *chan, 915 struct scatterlist *sgl, 916 unsigned int sg_len, 917 enum dma_transfer_direction dir, 918 unsigned long flags, void *context) 919 { 920 struct owl_dma *od = to_owl_dma(chan->device); 921 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 922 struct dma_slave_config *sconfig = &vchan->cfg; 923 struct owl_dma_txd *txd; 924 struct owl_dma_lli *lli, *prev = NULL; 925 struct scatterlist *sg; 926 dma_addr_t addr, src = 0, dst = 0; 927 size_t len; 928 int ret, i; 929 930 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 931 if (!txd) 932 return NULL; 933 934 INIT_LIST_HEAD(&txd->lli_list); 935 936 for_each_sg(sgl, sg, sg_len, i) { 937 addr = sg_dma_address(sg); 938 len = sg_dma_len(sg); 939 940 if (len > OWL_DMA_FRAME_MAX_LENGTH) { 941 dev_err(od->dma.dev, 942 "frame length exceeds max supported length"); 943 goto err_txd_free; 944 } 945 946 lli = owl_dma_alloc_lli(od); 947 if (!lli) { 948 dev_err(chan2dev(chan), "failed to allocate lli"); 949 goto err_txd_free; 950 } 951 952 if (dir == DMA_MEM_TO_DEV) { 953 src = addr; 954 dst = sconfig->dst_addr; 955 } else { 956 src = sconfig->src_addr; 957 dst = addr; 958 } 959 960 ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, 961 txd->cyclic); 962 if (ret) { 963 dev_warn(chan2dev(chan), "failed to config lli"); 964 goto err_txd_free; 965 } 966 967 prev = owl_dma_add_lli(txd, prev, lli, false); 968 } 969 970 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 971 972 err_txd_free: 973 owl_dma_free_txd(od, txd); 974 975 return NULL; 976 } 977 978 static struct dma_async_tx_descriptor 979 *owl_prep_dma_cyclic(struct dma_chan *chan, 980 dma_addr_t buf_addr, size_t buf_len, 981 size_t period_len, 982 enum dma_transfer_direction dir, 983 unsigned long flags) 984 { 985 struct owl_dma *od = to_owl_dma(chan->device); 986 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 987 struct dma_slave_config *sconfig = &vchan->cfg; 988 struct owl_dma_txd *txd; 989 struct owl_dma_lli *lli, *prev = NULL, *first = NULL; 990 dma_addr_t src = 0, dst = 0; 991 unsigned int periods = buf_len / period_len; 992 int ret, i; 993 994 txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 995 if (!txd) 996 return NULL; 997 998 INIT_LIST_HEAD(&txd->lli_list); 999 txd->cyclic = true; 1000 1001 for (i = 0; i < periods; i++) { 1002 lli = owl_dma_alloc_lli(od); 1003 if (!lli) { 1004 dev_warn(chan2dev(chan), "failed to allocate lli"); 1005 goto err_txd_free; 1006 } 1007 1008 if (dir == DMA_MEM_TO_DEV) { 1009 src = buf_addr + (period_len * i); 1010 dst = sconfig->dst_addr; 1011 } else if (dir == DMA_DEV_TO_MEM) { 1012 src = sconfig->src_addr; 1013 dst = buf_addr + (period_len * i); 1014 } 1015 1016 ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, 1017 dir, sconfig, txd->cyclic); 1018 if (ret) { 1019 dev_warn(chan2dev(chan), "failed to config lli"); 1020 goto err_txd_free; 1021 } 1022 1023 if (!first) 1024 first = lli; 1025 1026 prev = owl_dma_add_lli(txd, prev, lli, false); 1027 } 1028 1029 /* close the cyclic list */ 1030 owl_dma_add_lli(txd, prev, first, true); 1031 1032 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 1033 1034 err_txd_free: 1035 owl_dma_free_txd(od, txd); 1036 1037 return NULL; 1038 } 1039 1040 static void owl_dma_free_chan_resources(struct dma_chan *chan) 1041 { 1042 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 1043 1044 /* Ensure all queued descriptors are freed */ 1045 vchan_free_chan_resources(&vchan->vc); 1046 } 1047 1048 static inline void owl_dma_free(struct owl_dma *od) 1049 { 1050 struct owl_dma_vchan *vchan = NULL; 1051 struct owl_dma_vchan *next; 1052 1053 list_for_each_entry_safe(vchan, 1054 next, &od->dma.channels, vc.chan.device_node) { 1055 list_del(&vchan->vc.chan.device_node); 1056 tasklet_kill(&vchan->vc.task); 1057 } 1058 } 1059 1060 static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, 1061 struct of_dma *ofdma) 1062 { 1063 struct owl_dma *od = ofdma->of_dma_data; 1064 struct owl_dma_vchan *vchan; 1065 struct dma_chan *chan; 1066 u8 drq = dma_spec->args[0]; 1067 1068 if (drq > od->nr_vchans) 1069 return NULL; 1070 1071 chan = dma_get_any_slave_channel(&od->dma); 1072 if (!chan) 1073 return NULL; 1074 1075 vchan = to_owl_vchan(chan); 1076 vchan->drq = drq; 1077 1078 return chan; 1079 } 1080 1081 static const struct of_device_id owl_dma_match[] = { 1082 { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,}, 1083 { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,}, 1084 { /* sentinel */ }, 1085 }; 1086 MODULE_DEVICE_TABLE(of, owl_dma_match); 1087 1088 static int owl_dma_probe(struct platform_device *pdev) 1089 { 1090 struct device_node *np = pdev->dev.of_node; 1091 struct owl_dma *od; 1092 int ret, i, nr_channels, nr_requests; 1093 1094 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 1095 if (!od) 1096 return -ENOMEM; 1097 1098 od->base = devm_platform_ioremap_resource(pdev, 0); 1099 if (IS_ERR(od->base)) 1100 return PTR_ERR(od->base); 1101 1102 ret = of_property_read_u32(np, "dma-channels", &nr_channels); 1103 if (ret) { 1104 dev_err(&pdev->dev, "can't get dma-channels\n"); 1105 return ret; 1106 } 1107 1108 ret = of_property_read_u32(np, "dma-requests", &nr_requests); 1109 if (ret) { 1110 dev_err(&pdev->dev, "can't get dma-requests\n"); 1111 return ret; 1112 } 1113 1114 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", 1115 nr_channels, nr_requests); 1116 1117 od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); 1118 1119 od->nr_pchans = nr_channels; 1120 od->nr_vchans = nr_requests; 1121 1122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1123 1124 platform_set_drvdata(pdev, od); 1125 spin_lock_init(&od->lock); 1126 1127 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); 1128 dma_cap_set(DMA_SLAVE, od->dma.cap_mask); 1129 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); 1130 1131 od->dma.dev = &pdev->dev; 1132 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; 1133 od->dma.device_tx_status = owl_dma_tx_status; 1134 od->dma.device_issue_pending = owl_dma_issue_pending; 1135 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; 1136 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; 1137 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; 1138 od->dma.device_config = owl_dma_config; 1139 od->dma.device_pause = owl_dma_pause; 1140 od->dma.device_resume = owl_dma_resume; 1141 od->dma.device_terminate_all = owl_dma_terminate_all; 1142 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1143 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1144 od->dma.directions = BIT(DMA_MEM_TO_MEM); 1145 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1146 1147 INIT_LIST_HEAD(&od->dma.channels); 1148 1149 od->clk = devm_clk_get(&pdev->dev, NULL); 1150 if (IS_ERR(od->clk)) { 1151 dev_err(&pdev->dev, "unable to get clock\n"); 1152 return PTR_ERR(od->clk); 1153 } 1154 1155 /* 1156 * Eventhough the DMA controller is capable of generating 4 1157 * IRQ's for DMA priority feature, we only use 1 IRQ for 1158 * simplification. 1159 */ 1160 od->irq = platform_get_irq(pdev, 0); 1161 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, 1162 dev_name(&pdev->dev), od); 1163 if (ret) { 1164 dev_err(&pdev->dev, "unable to request IRQ\n"); 1165 return ret; 1166 } 1167 1168 /* Init physical channel */ 1169 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, 1170 sizeof(struct owl_dma_pchan), GFP_KERNEL); 1171 if (!od->pchans) 1172 return -ENOMEM; 1173 1174 for (i = 0; i < od->nr_pchans; i++) { 1175 struct owl_dma_pchan *pchan = &od->pchans[i]; 1176 1177 pchan->id = i; 1178 pchan->base = od->base + OWL_DMA_CHAN_BASE(i); 1179 } 1180 1181 /* Init virtual channel */ 1182 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, 1183 sizeof(struct owl_dma_vchan), GFP_KERNEL); 1184 if (!od->vchans) 1185 return -ENOMEM; 1186 1187 for (i = 0; i < od->nr_vchans; i++) { 1188 struct owl_dma_vchan *vchan = &od->vchans[i]; 1189 1190 vchan->vc.desc_free = owl_dma_desc_free; 1191 vchan_init(&vchan->vc, &od->dma); 1192 } 1193 1194 /* Create a pool of consistent memory blocks for hardware descriptors */ 1195 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, 1196 sizeof(struct owl_dma_lli), 1197 __alignof__(struct owl_dma_lli), 1198 0); 1199 if (!od->lli_pool) { 1200 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); 1201 return -ENOMEM; 1202 } 1203 1204 clk_prepare_enable(od->clk); 1205 1206 ret = dma_async_device_register(&od->dma); 1207 if (ret) { 1208 dev_err(&pdev->dev, "failed to register DMA engine device\n"); 1209 goto err_pool_free; 1210 } 1211 1212 /* Device-tree DMA controller registration */ 1213 ret = of_dma_controller_register(pdev->dev.of_node, 1214 owl_dma_of_xlate, od); 1215 if (ret) { 1216 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); 1217 goto err_dma_unregister; 1218 } 1219 1220 return 0; 1221 1222 err_dma_unregister: 1223 dma_async_device_unregister(&od->dma); 1224 err_pool_free: 1225 clk_disable_unprepare(od->clk); 1226 dma_pool_destroy(od->lli_pool); 1227 1228 return ret; 1229 } 1230 1231 static int owl_dma_remove(struct platform_device *pdev) 1232 { 1233 struct owl_dma *od = platform_get_drvdata(pdev); 1234 1235 of_dma_controller_free(pdev->dev.of_node); 1236 dma_async_device_unregister(&od->dma); 1237 1238 /* Mask all interrupts for this execution environment */ 1239 dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); 1240 1241 /* Make sure we won't have any further interrupts */ 1242 devm_free_irq(od->dma.dev, od->irq, od); 1243 1244 owl_dma_free(od); 1245 1246 clk_disable_unprepare(od->clk); 1247 1248 return 0; 1249 } 1250 1251 static struct platform_driver owl_dma_driver = { 1252 .probe = owl_dma_probe, 1253 .remove = owl_dma_remove, 1254 .driver = { 1255 .name = "dma-owl", 1256 .of_match_table = of_match_ptr(owl_dma_match), 1257 }, 1258 }; 1259 1260 static int owl_dma_init(void) 1261 { 1262 return platform_driver_register(&owl_dma_driver); 1263 } 1264 subsys_initcall(owl_dma_init); 1265 1266 static void __exit owl_dma_exit(void) 1267 { 1268 platform_driver_unregister(&owl_dma_driver); 1269 } 1270 module_exit(owl_dma_exit); 1271 1272 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>"); 1273 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 1274 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); 1275 MODULE_LICENSE("GPL"); 1276