1 /* 2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 * 4 * Refer to drivers/dma/imx-sdma.c 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/interrupt.h> 15 #include <linux/clk.h> 16 #include <linux/wait.h> 17 #include <linux/sched.h> 18 #include <linux/semaphore.h> 19 #include <linux/device.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/slab.h> 22 #include <linux/platform_device.h> 23 #include <linux/dmaengine.h> 24 #include <linux/delay.h> 25 #include <linux/module.h> 26 #include <linux/stmp_device.h> 27 #include <linux/of.h> 28 #include <linux/of_device.h> 29 #include <linux/of_dma.h> 30 31 #include <asm/irq.h> 32 33 #include "dmaengine.h" 34 35 /* 36 * NOTE: The term "PIO" throughout the mxs-dma implementation means 37 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 38 * dma can program the controller registers of peripheral devices. 39 */ 40 41 #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) 42 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) 43 44 #define HW_APBHX_CTRL0 0x000 45 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 46 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) 47 #define BP_APBH_CTRL0_RESET_CHANNEL 16 48 #define HW_APBHX_CTRL1 0x010 49 #define HW_APBHX_CTRL2 0x020 50 #define HW_APBHX_CHANNEL_CTRL 0x030 51 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 52 /* 53 * The offset of NXTCMDAR register is different per both dma type and version, 54 * while stride for each channel is all the same 0x70. 55 */ 56 #define HW_APBHX_CHn_NXTCMDAR(d, n) \ 57 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) 58 #define HW_APBHX_CHn_SEMA(d, n) \ 59 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) 60 61 /* 62 * ccw bits definitions 63 * 64 * COMMAND: 0..1 (2) 65 * CHAIN: 2 (1) 66 * IRQ: 3 (1) 67 * NAND_LOCK: 4 (1) - not implemented 68 * NAND_WAIT4READY: 5 (1) - not implemented 69 * DEC_SEM: 6 (1) 70 * WAIT4END: 7 (1) 71 * HALT_ON_TERMINATE: 8 (1) 72 * TERMINATE_FLUSH: 9 (1) 73 * RESERVED: 10..11 (2) 74 * PIO_NUM: 12..15 (4) 75 */ 76 #define BP_CCW_COMMAND 0 77 #define BM_CCW_COMMAND (3 << 0) 78 #define CCW_CHAIN (1 << 2) 79 #define CCW_IRQ (1 << 3) 80 #define CCW_DEC_SEM (1 << 6) 81 #define CCW_WAIT4END (1 << 7) 82 #define CCW_HALT_ON_TERM (1 << 8) 83 #define CCW_TERM_FLUSH (1 << 9) 84 #define BP_CCW_PIO_NUM 12 85 #define BM_CCW_PIO_NUM (0xf << 12) 86 87 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) 88 89 #define MXS_DMA_CMD_NO_XFER 0 90 #define MXS_DMA_CMD_WRITE 1 91 #define MXS_DMA_CMD_READ 2 92 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ 93 94 struct mxs_dma_ccw { 95 u32 next; 96 u16 bits; 97 u16 xfer_bytes; 98 #define MAX_XFER_BYTES 0xff00 99 u32 bufaddr; 100 #define MXS_PIO_WORDS 16 101 u32 pio_words[MXS_PIO_WORDS]; 102 }; 103 104 #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) 105 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) 106 107 struct mxs_dma_chan { 108 struct mxs_dma_engine *mxs_dma; 109 struct dma_chan chan; 110 struct dma_async_tx_descriptor desc; 111 struct tasklet_struct tasklet; 112 unsigned int chan_irq; 113 struct mxs_dma_ccw *ccw; 114 dma_addr_t ccw_phys; 115 int desc_count; 116 enum dma_status status; 117 unsigned int flags; 118 #define MXS_DMA_SG_LOOP (1 << 0) 119 }; 120 121 #define MXS_DMA_CHANNELS 16 122 #define MXS_DMA_CHANNELS_MASK 0xffff 123 124 enum mxs_dma_devtype { 125 MXS_DMA_APBH, 126 MXS_DMA_APBX, 127 }; 128 129 enum mxs_dma_id { 130 IMX23_DMA, 131 IMX28_DMA, 132 }; 133 134 struct mxs_dma_engine { 135 enum mxs_dma_id dev_id; 136 enum mxs_dma_devtype type; 137 void __iomem *base; 138 struct clk *clk; 139 struct dma_device dma_device; 140 struct device_dma_parameters dma_parms; 141 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 142 struct platform_device *pdev; 143 unsigned int nr_channels; 144 }; 145 146 struct mxs_dma_type { 147 enum mxs_dma_id id; 148 enum mxs_dma_devtype type; 149 }; 150 151 static struct mxs_dma_type mxs_dma_types[] = { 152 { 153 .id = IMX23_DMA, 154 .type = MXS_DMA_APBH, 155 }, { 156 .id = IMX23_DMA, 157 .type = MXS_DMA_APBX, 158 }, { 159 .id = IMX28_DMA, 160 .type = MXS_DMA_APBH, 161 }, { 162 .id = IMX28_DMA, 163 .type = MXS_DMA_APBX, 164 } 165 }; 166 167 static struct platform_device_id mxs_dma_ids[] = { 168 { 169 .name = "imx23-dma-apbh", 170 .driver_data = (kernel_ulong_t) &mxs_dma_types[0], 171 }, { 172 .name = "imx23-dma-apbx", 173 .driver_data = (kernel_ulong_t) &mxs_dma_types[1], 174 }, { 175 .name = "imx28-dma-apbh", 176 .driver_data = (kernel_ulong_t) &mxs_dma_types[2], 177 }, { 178 .name = "imx28-dma-apbx", 179 .driver_data = (kernel_ulong_t) &mxs_dma_types[3], 180 }, { 181 /* end of list */ 182 } 183 }; 184 185 static const struct of_device_id mxs_dma_dt_ids[] = { 186 { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, 187 { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, 188 { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, 189 { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, 190 { /* sentinel */ } 191 }; 192 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); 193 194 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 195 { 196 return container_of(chan, struct mxs_dma_chan, chan); 197 } 198 199 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 200 { 201 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 202 int chan_id = mxs_chan->chan.chan_id; 203 204 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 205 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 206 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 207 else 208 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 209 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 210 } 211 212 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 213 { 214 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 215 int chan_id = mxs_chan->chan.chan_id; 216 217 /* set cmd_addr up */ 218 writel(mxs_chan->ccw_phys, 219 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); 220 221 /* write 1 to SEMA to kick off the channel */ 222 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); 223 } 224 225 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 226 { 227 mxs_chan->status = DMA_SUCCESS; 228 } 229 230 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 231 { 232 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 233 int chan_id = mxs_chan->chan.chan_id; 234 235 /* freeze the channel */ 236 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 237 writel(1 << chan_id, 238 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 239 else 240 writel(1 << chan_id, 241 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 242 243 mxs_chan->status = DMA_PAUSED; 244 } 245 246 static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 247 { 248 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 249 int chan_id = mxs_chan->chan.chan_id; 250 251 /* unfreeze the channel */ 252 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 253 writel(1 << chan_id, 254 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); 255 else 256 writel(1 << chan_id, 257 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); 258 259 mxs_chan->status = DMA_IN_PROGRESS; 260 } 261 262 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 263 { 264 return dma_cookie_assign(tx); 265 } 266 267 static void mxs_dma_tasklet(unsigned long data) 268 { 269 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; 270 271 if (mxs_chan->desc.callback) 272 mxs_chan->desc.callback(mxs_chan->desc.callback_param); 273 } 274 275 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) 276 { 277 struct mxs_dma_engine *mxs_dma = dev_id; 278 u32 stat1, stat2; 279 280 /* completion status */ 281 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 282 stat1 &= MXS_DMA_CHANNELS_MASK; 283 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); 284 285 /* error status */ 286 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 287 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); 288 289 /* 290 * When both completion and error of termination bits set at the 291 * same time, we do not take it as an error. IOW, it only becomes 292 * an error we need to handle here in case of either it's (1) a bus 293 * error or (2) a termination error with no completion. 294 */ 295 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 296 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ 297 298 /* combine error and completion status for checking */ 299 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; 300 while (stat1) { 301 int channel = fls(stat1) - 1; 302 struct mxs_dma_chan *mxs_chan = 303 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; 304 305 if (channel >= MXS_DMA_CHANNELS) { 306 dev_dbg(mxs_dma->dma_device.dev, 307 "%s: error in channel %d\n", __func__, 308 channel - MXS_DMA_CHANNELS); 309 mxs_chan->status = DMA_ERROR; 310 mxs_dma_reset_chan(mxs_chan); 311 } else { 312 if (mxs_chan->flags & MXS_DMA_SG_LOOP) 313 mxs_chan->status = DMA_IN_PROGRESS; 314 else 315 mxs_chan->status = DMA_SUCCESS; 316 } 317 318 stat1 &= ~(1 << channel); 319 320 if (mxs_chan->status == DMA_SUCCESS) 321 dma_cookie_complete(&mxs_chan->desc); 322 323 /* schedule tasklet on this channel */ 324 tasklet_schedule(&mxs_chan->tasklet); 325 } 326 327 return IRQ_HANDLED; 328 } 329 330 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) 331 { 332 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 333 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 334 int ret; 335 336 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, 337 CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, 338 GFP_KERNEL); 339 if (!mxs_chan->ccw) { 340 ret = -ENOMEM; 341 goto err_alloc; 342 } 343 344 memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); 345 346 if (mxs_chan->chan_irq != NO_IRQ) { 347 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 348 0, "mxs-dma", mxs_dma); 349 if (ret) 350 goto err_irq; 351 } 352 353 ret = clk_prepare_enable(mxs_dma->clk); 354 if (ret) 355 goto err_clk; 356 357 mxs_dma_reset_chan(mxs_chan); 358 359 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 360 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 361 362 /* the descriptor is ready */ 363 async_tx_ack(&mxs_chan->desc); 364 365 return 0; 366 367 err_clk: 368 free_irq(mxs_chan->chan_irq, mxs_dma); 369 err_irq: 370 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 371 mxs_chan->ccw, mxs_chan->ccw_phys); 372 err_alloc: 373 return ret; 374 } 375 376 static void mxs_dma_free_chan_resources(struct dma_chan *chan) 377 { 378 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 379 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 380 381 mxs_dma_disable_chan(mxs_chan); 382 383 free_irq(mxs_chan->chan_irq, mxs_dma); 384 385 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 386 mxs_chan->ccw, mxs_chan->ccw_phys); 387 388 clk_disable_unprepare(mxs_dma->clk); 389 } 390 391 /* 392 * How to use the flags for ->device_prep_slave_sg() : 393 * [1] If there is only one DMA command in the DMA chain, the code should be: 394 * ...... 395 * ->device_prep_slave_sg(DMA_CTRL_ACK); 396 * ...... 397 * [2] If there are two DMA commands in the DMA chain, the code should be 398 * ...... 399 * ->device_prep_slave_sg(0); 400 * ...... 401 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 402 * ...... 403 * [3] If there are more than two DMA commands in the DMA chain, the code 404 * should be: 405 * ...... 406 * ->device_prep_slave_sg(0); // First 407 * ...... 408 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); 409 * ...... 410 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last 411 * ...... 412 */ 413 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 414 struct dma_chan *chan, struct scatterlist *sgl, 415 unsigned int sg_len, enum dma_transfer_direction direction, 416 unsigned long flags, void *context) 417 { 418 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 419 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 420 struct mxs_dma_ccw *ccw; 421 struct scatterlist *sg; 422 u32 i, j; 423 u32 *pio; 424 bool append = flags & DMA_PREP_INTERRUPT; 425 int idx = append ? mxs_chan->desc_count : 0; 426 427 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 428 return NULL; 429 430 if (sg_len + (append ? idx : 0) > NUM_CCW) { 431 dev_err(mxs_dma->dma_device.dev, 432 "maximum number of sg exceeded: %d > %d\n", 433 sg_len, NUM_CCW); 434 goto err_out; 435 } 436 437 mxs_chan->status = DMA_IN_PROGRESS; 438 mxs_chan->flags = 0; 439 440 /* 441 * If the sg is prepared with append flag set, the sg 442 * will be appended to the last prepared sg. 443 */ 444 if (append) { 445 BUG_ON(idx < 1); 446 ccw = &mxs_chan->ccw[idx - 1]; 447 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 448 ccw->bits |= CCW_CHAIN; 449 ccw->bits &= ~CCW_IRQ; 450 ccw->bits &= ~CCW_DEC_SEM; 451 } else { 452 idx = 0; 453 } 454 455 if (direction == DMA_TRANS_NONE) { 456 ccw = &mxs_chan->ccw[idx++]; 457 pio = (u32 *) sgl; 458 459 for (j = 0; j < sg_len;) 460 ccw->pio_words[j++] = *pio++; 461 462 ccw->bits = 0; 463 ccw->bits |= CCW_IRQ; 464 ccw->bits |= CCW_DEC_SEM; 465 if (flags & DMA_CTRL_ACK) 466 ccw->bits |= CCW_WAIT4END; 467 ccw->bits |= CCW_HALT_ON_TERM; 468 ccw->bits |= CCW_TERM_FLUSH; 469 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 470 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 471 } else { 472 for_each_sg(sgl, sg, sg_len, i) { 473 if (sg_dma_len(sg) > MAX_XFER_BYTES) { 474 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 475 sg_dma_len(sg), MAX_XFER_BYTES); 476 goto err_out; 477 } 478 479 ccw = &mxs_chan->ccw[idx++]; 480 481 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 482 ccw->bufaddr = sg->dma_address; 483 ccw->xfer_bytes = sg_dma_len(sg); 484 485 ccw->bits = 0; 486 ccw->bits |= CCW_CHAIN; 487 ccw->bits |= CCW_HALT_ON_TERM; 488 ccw->bits |= CCW_TERM_FLUSH; 489 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 490 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 491 COMMAND); 492 493 if (i + 1 == sg_len) { 494 ccw->bits &= ~CCW_CHAIN; 495 ccw->bits |= CCW_IRQ; 496 ccw->bits |= CCW_DEC_SEM; 497 if (flags & DMA_CTRL_ACK) 498 ccw->bits |= CCW_WAIT4END; 499 } 500 } 501 } 502 mxs_chan->desc_count = idx; 503 504 return &mxs_chan->desc; 505 506 err_out: 507 mxs_chan->status = DMA_ERROR; 508 return NULL; 509 } 510 511 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 512 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 513 size_t period_len, enum dma_transfer_direction direction, 514 unsigned long flags, void *context) 515 { 516 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 517 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 518 u32 num_periods = buf_len / period_len; 519 u32 i = 0, buf = 0; 520 521 if (mxs_chan->status == DMA_IN_PROGRESS) 522 return NULL; 523 524 mxs_chan->status = DMA_IN_PROGRESS; 525 mxs_chan->flags |= MXS_DMA_SG_LOOP; 526 527 if (num_periods > NUM_CCW) { 528 dev_err(mxs_dma->dma_device.dev, 529 "maximum number of sg exceeded: %d > %d\n", 530 num_periods, NUM_CCW); 531 goto err_out; 532 } 533 534 if (period_len > MAX_XFER_BYTES) { 535 dev_err(mxs_dma->dma_device.dev, 536 "maximum period size exceeded: %d > %d\n", 537 period_len, MAX_XFER_BYTES); 538 goto err_out; 539 } 540 541 while (buf < buf_len) { 542 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; 543 544 if (i + 1 == num_periods) 545 ccw->next = mxs_chan->ccw_phys; 546 else 547 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); 548 549 ccw->bufaddr = dma_addr; 550 ccw->xfer_bytes = period_len; 551 552 ccw->bits = 0; 553 ccw->bits |= CCW_CHAIN; 554 ccw->bits |= CCW_IRQ; 555 ccw->bits |= CCW_HALT_ON_TERM; 556 ccw->bits |= CCW_TERM_FLUSH; 557 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 558 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 559 560 dma_addr += period_len; 561 buf += period_len; 562 563 i++; 564 } 565 mxs_chan->desc_count = i; 566 567 return &mxs_chan->desc; 568 569 err_out: 570 mxs_chan->status = DMA_ERROR; 571 return NULL; 572 } 573 574 static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 575 unsigned long arg) 576 { 577 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 578 int ret = 0; 579 580 switch (cmd) { 581 case DMA_TERMINATE_ALL: 582 mxs_dma_reset_chan(mxs_chan); 583 mxs_dma_disable_chan(mxs_chan); 584 break; 585 case DMA_PAUSE: 586 mxs_dma_pause_chan(mxs_chan); 587 break; 588 case DMA_RESUME: 589 mxs_dma_resume_chan(mxs_chan); 590 break; 591 default: 592 ret = -ENOSYS; 593 } 594 595 return ret; 596 } 597 598 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 599 dma_cookie_t cookie, struct dma_tx_state *txstate) 600 { 601 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 602 603 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); 604 605 return mxs_chan->status; 606 } 607 608 static void mxs_dma_issue_pending(struct dma_chan *chan) 609 { 610 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 611 612 mxs_dma_enable_chan(mxs_chan); 613 } 614 615 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 616 { 617 int ret; 618 619 ret = clk_prepare_enable(mxs_dma->clk); 620 if (ret) 621 return ret; 622 623 ret = stmp_reset_block(mxs_dma->base); 624 if (ret) 625 goto err_out; 626 627 /* enable apbh burst */ 628 if (dma_is_apbh(mxs_dma)) { 629 writel(BM_APBH_CTRL0_APB_BURST_EN, 630 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 631 writel(BM_APBH_CTRL0_APB_BURST8_EN, 632 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 633 } 634 635 /* enable irq for all the channels */ 636 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 637 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); 638 639 err_out: 640 clk_disable_unprepare(mxs_dma->clk); 641 return ret; 642 } 643 644 struct mxs_dma_filter_param { 645 struct device_node *of_node; 646 unsigned int chan_id; 647 }; 648 649 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) 650 { 651 struct mxs_dma_filter_param *param = fn_param; 652 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 653 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 654 int chan_irq; 655 656 if (mxs_dma->dma_device.dev->of_node != param->of_node) 657 return false; 658 659 if (chan->chan_id != param->chan_id) 660 return false; 661 662 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id); 663 if (chan_irq < 0) 664 return false; 665 666 mxs_chan->chan_irq = chan_irq; 667 668 return true; 669 } 670 671 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, 672 struct of_dma *ofdma) 673 { 674 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; 675 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask; 676 struct mxs_dma_filter_param param; 677 678 if (dma_spec->args_count != 1) 679 return NULL; 680 681 param.of_node = ofdma->of_node; 682 param.chan_id = dma_spec->args[0]; 683 684 if (param.chan_id >= mxs_dma->nr_channels) 685 return NULL; 686 687 return dma_request_channel(mask, mxs_dma_filter_fn, ¶m); 688 } 689 690 static int __init mxs_dma_probe(struct platform_device *pdev) 691 { 692 struct device_node *np = pdev->dev.of_node; 693 const struct platform_device_id *id_entry; 694 const struct of_device_id *of_id; 695 const struct mxs_dma_type *dma_type; 696 struct mxs_dma_engine *mxs_dma; 697 struct resource *iores; 698 int ret, i; 699 700 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL); 701 if (!mxs_dma) 702 return -ENOMEM; 703 704 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels); 705 if (ret) { 706 dev_err(&pdev->dev, "failed to read dma-channels\n"); 707 return ret; 708 } 709 710 of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); 711 if (of_id) 712 id_entry = of_id->data; 713 else 714 id_entry = platform_get_device_id(pdev); 715 716 dma_type = (struct mxs_dma_type *)id_entry->driver_data; 717 mxs_dma->type = dma_type->type; 718 mxs_dma->dev_id = dma_type->id; 719 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 721 mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores); 722 if (IS_ERR(mxs_dma->base)) 723 return PTR_ERR(mxs_dma->base); 724 725 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL); 726 if (IS_ERR(mxs_dma->clk)) 727 return PTR_ERR(mxs_dma->clk); 728 729 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); 730 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); 731 732 INIT_LIST_HEAD(&mxs_dma->dma_device.channels); 733 734 /* Initialize channel parameters */ 735 for (i = 0; i < MXS_DMA_CHANNELS; i++) { 736 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; 737 738 mxs_chan->mxs_dma = mxs_dma; 739 mxs_chan->chan.device = &mxs_dma->dma_device; 740 dma_cookie_init(&mxs_chan->chan); 741 742 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 743 (unsigned long) mxs_chan); 744 745 746 /* Add the channel to mxs_chan list */ 747 list_add_tail(&mxs_chan->chan.device_node, 748 &mxs_dma->dma_device.channels); 749 } 750 751 ret = mxs_dma_init(mxs_dma); 752 if (ret) 753 return ret; 754 755 mxs_dma->pdev = pdev; 756 mxs_dma->dma_device.dev = &pdev->dev; 757 758 /* mxs_dma gets 65535 bytes maximum sg size */ 759 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; 760 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); 761 762 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; 763 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; 764 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 765 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 766 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 767 mxs_dma->dma_device.device_control = mxs_dma_control; 768 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 769 770 ret = dma_async_device_register(&mxs_dma->dma_device); 771 if (ret) { 772 dev_err(mxs_dma->dma_device.dev, "unable to register\n"); 773 return ret; 774 } 775 776 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma); 777 if (ret) { 778 dev_err(mxs_dma->dma_device.dev, 779 "failed to register controller\n"); 780 dma_async_device_unregister(&mxs_dma->dma_device); 781 } 782 783 dev_info(mxs_dma->dma_device.dev, "initialized\n"); 784 785 return 0; 786 } 787 788 static struct platform_driver mxs_dma_driver = { 789 .driver = { 790 .name = "mxs-dma", 791 .of_match_table = mxs_dma_dt_ids, 792 }, 793 .id_table = mxs_dma_ids, 794 }; 795 796 static int __init mxs_dma_module_init(void) 797 { 798 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); 799 } 800 subsys_initcall(mxs_dma_module_init); 801