1 /* 2 * drivers/dma/imx-dma.c 3 * 4 * This file contains a driver for the Freescale i.MX DMA engine 5 * found on i.MX1/21/27 6 * 7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8 * 9 * The code contained herein is licensed under the GNU General Public 10 * License. You may obtain a copy of the GNU General Public License 11 * Version 2 or later at the following locations: 12 * 13 * http://www.opensource.org/licenses/gpl-license.html 14 * http://www.gnu.org/copyleft/gpl.html 15 */ 16 #include <linux/init.h> 17 #include <linux/types.h> 18 #include <linux/mm.h> 19 #include <linux/interrupt.h> 20 #include <linux/spinlock.h> 21 #include <linux/device.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/slab.h> 24 #include <linux/platform_device.h> 25 #include <linux/dmaengine.h> 26 27 #include <asm/irq.h> 28 #include <mach/dma-v1.h> 29 #include <mach/hardware.h> 30 31 struct imxdma_channel { 32 struct imxdma_engine *imxdma; 33 unsigned int channel; 34 unsigned int imxdma_channel; 35 36 enum dma_slave_buswidth word_size; 37 dma_addr_t per_address; 38 u32 watermark_level; 39 struct dma_chan chan; 40 spinlock_t lock; 41 struct dma_async_tx_descriptor desc; 42 dma_cookie_t last_completed; 43 enum dma_status status; 44 int dma_request; 45 struct scatterlist *sg_list; 46 }; 47 48 #define MAX_DMA_CHANNELS 8 49 50 struct imxdma_engine { 51 struct device *dev; 52 struct dma_device dma_device; 53 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 }; 55 56 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 57 { 58 return container_of(chan, struct imxdma_channel, chan); 59 } 60 61 static void imxdma_handle(struct imxdma_channel *imxdmac) 62 { 63 if (imxdmac->desc.callback) 64 imxdmac->desc.callback(imxdmac->desc.callback_param); 65 imxdmac->last_completed = imxdmac->desc.cookie; 66 } 67 68 static void imxdma_irq_handler(int channel, void *data) 69 { 70 struct imxdma_channel *imxdmac = data; 71 72 imxdmac->status = DMA_SUCCESS; 73 imxdma_handle(imxdmac); 74 } 75 76 static void imxdma_err_handler(int channel, void *data, int error) 77 { 78 struct imxdma_channel *imxdmac = data; 79 80 imxdmac->status = DMA_ERROR; 81 imxdma_handle(imxdmac); 82 } 83 84 static void imxdma_progression(int channel, void *data, 85 struct scatterlist *sg) 86 { 87 struct imxdma_channel *imxdmac = data; 88 89 imxdmac->status = DMA_SUCCESS; 90 imxdma_handle(imxdmac); 91 } 92 93 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 94 unsigned long arg) 95 { 96 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 97 struct dma_slave_config *dmaengine_cfg = (void *)arg; 98 int ret; 99 unsigned int mode = 0; 100 101 switch (cmd) { 102 case DMA_TERMINATE_ALL: 103 imxdmac->status = DMA_ERROR; 104 imx_dma_disable(imxdmac->imxdma_channel); 105 return 0; 106 case DMA_SLAVE_CONFIG: 107 if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 108 imxdmac->per_address = dmaengine_cfg->src_addr; 109 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 110 imxdmac->word_size = dmaengine_cfg->src_addr_width; 111 } else { 112 imxdmac->per_address = dmaengine_cfg->dst_addr; 113 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 114 imxdmac->word_size = dmaengine_cfg->dst_addr_width; 115 } 116 117 switch (imxdmac->word_size) { 118 case DMA_SLAVE_BUSWIDTH_1_BYTE: 119 mode = IMX_DMA_MEMSIZE_8; 120 break; 121 case DMA_SLAVE_BUSWIDTH_2_BYTES: 122 mode = IMX_DMA_MEMSIZE_16; 123 break; 124 default: 125 case DMA_SLAVE_BUSWIDTH_4_BYTES: 126 mode = IMX_DMA_MEMSIZE_32; 127 break; 128 } 129 ret = imx_dma_config_channel(imxdmac->imxdma_channel, 130 mode | IMX_DMA_TYPE_FIFO, 131 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 132 imxdmac->dma_request, 1); 133 134 if (ret) 135 return ret; 136 137 imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level); 138 139 return 0; 140 default: 141 return -ENOSYS; 142 } 143 144 return -EINVAL; 145 } 146 147 static enum dma_status imxdma_tx_status(struct dma_chan *chan, 148 dma_cookie_t cookie, 149 struct dma_tx_state *txstate) 150 { 151 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 152 dma_cookie_t last_used; 153 enum dma_status ret; 154 155 last_used = chan->cookie; 156 157 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); 158 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); 159 160 return ret; 161 } 162 163 static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) 164 { 165 dma_cookie_t cookie = imxdma->chan.cookie; 166 167 if (++cookie < 0) 168 cookie = 1; 169 170 imxdma->chan.cookie = cookie; 171 imxdma->desc.cookie = cookie; 172 173 return cookie; 174 } 175 176 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 177 { 178 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 179 dma_cookie_t cookie; 180 181 spin_lock_irq(&imxdmac->lock); 182 183 cookie = imxdma_assign_cookie(imxdmac); 184 185 imx_dma_enable(imxdmac->imxdma_channel); 186 187 spin_unlock_irq(&imxdmac->lock); 188 189 return cookie; 190 } 191 192 static int imxdma_alloc_chan_resources(struct dma_chan *chan) 193 { 194 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 195 struct imx_dma_data *data = chan->private; 196 197 imxdmac->dma_request = data->dma_request; 198 199 dma_async_tx_descriptor_init(&imxdmac->desc, chan); 200 imxdmac->desc.tx_submit = imxdma_tx_submit; 201 /* txd.flags will be overwritten in prep funcs */ 202 imxdmac->desc.flags = DMA_CTRL_ACK; 203 204 imxdmac->status = DMA_SUCCESS; 205 206 return 0; 207 } 208 209 static void imxdma_free_chan_resources(struct dma_chan *chan) 210 { 211 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 212 213 imx_dma_disable(imxdmac->imxdma_channel); 214 215 if (imxdmac->sg_list) { 216 kfree(imxdmac->sg_list); 217 imxdmac->sg_list = NULL; 218 } 219 } 220 221 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 222 struct dma_chan *chan, struct scatterlist *sgl, 223 unsigned int sg_len, enum dma_data_direction direction, 224 unsigned long flags) 225 { 226 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 227 struct scatterlist *sg; 228 int i, ret, dma_length = 0; 229 unsigned int dmamode; 230 231 if (imxdmac->status == DMA_IN_PROGRESS) 232 return NULL; 233 234 imxdmac->status = DMA_IN_PROGRESS; 235 236 for_each_sg(sgl, sg, sg_len, i) { 237 dma_length += sg->length; 238 } 239 240 if (direction == DMA_FROM_DEVICE) 241 dmamode = DMA_MODE_READ; 242 else 243 dmamode = DMA_MODE_WRITE; 244 245 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 246 dma_length, imxdmac->per_address, dmamode); 247 if (ret) 248 return NULL; 249 250 return &imxdmac->desc; 251 } 252 253 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 254 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 255 size_t period_len, enum dma_data_direction direction) 256 { 257 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 258 struct imxdma_engine *imxdma = imxdmac->imxdma; 259 int i, ret; 260 unsigned int periods = buf_len / period_len; 261 unsigned int dmamode; 262 263 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 264 __func__, imxdmac->channel, buf_len, period_len); 265 266 if (imxdmac->status == DMA_IN_PROGRESS) 267 return NULL; 268 imxdmac->status = DMA_IN_PROGRESS; 269 270 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, 271 imxdma_progression); 272 if (ret) { 273 dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); 274 return NULL; 275 } 276 277 if (imxdmac->sg_list) 278 kfree(imxdmac->sg_list); 279 280 imxdmac->sg_list = kcalloc(periods + 1, 281 sizeof(struct scatterlist), GFP_KERNEL); 282 if (!imxdmac->sg_list) 283 return NULL; 284 285 sg_init_table(imxdmac->sg_list, periods); 286 287 for (i = 0; i < periods; i++) { 288 imxdmac->sg_list[i].page_link = 0; 289 imxdmac->sg_list[i].offset = 0; 290 imxdmac->sg_list[i].dma_address = dma_addr; 291 imxdmac->sg_list[i].length = period_len; 292 dma_addr += period_len; 293 } 294 295 /* close the loop */ 296 imxdmac->sg_list[periods].offset = 0; 297 imxdmac->sg_list[periods].length = 0; 298 imxdmac->sg_list[periods].page_link = 299 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 300 301 if (direction == DMA_FROM_DEVICE) 302 dmamode = DMA_MODE_READ; 303 else 304 dmamode = DMA_MODE_WRITE; 305 306 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, 307 IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); 308 if (ret) 309 return NULL; 310 311 return &imxdmac->desc; 312 } 313 314 static void imxdma_issue_pending(struct dma_chan *chan) 315 { 316 /* 317 * Nothing to do. We only have a single descriptor 318 */ 319 } 320 321 static int __init imxdma_probe(struct platform_device *pdev) 322 { 323 struct imxdma_engine *imxdma; 324 int ret, i; 325 326 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); 327 if (!imxdma) 328 return -ENOMEM; 329 330 INIT_LIST_HEAD(&imxdma->dma_device.channels); 331 332 /* Initialize channel parameters */ 333 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 334 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 335 336 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", 337 DMA_PRIO_MEDIUM); 338 if ((int)imxdmac->channel < 0) { 339 ret = -ENODEV; 340 goto err_init; 341 } 342 343 imx_dma_setup_handlers(imxdmac->imxdma_channel, 344 imxdma_irq_handler, imxdma_err_handler, imxdmac); 345 346 imxdmac->imxdma = imxdma; 347 spin_lock_init(&imxdmac->lock); 348 349 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 350 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 351 352 imxdmac->chan.device = &imxdma->dma_device; 353 imxdmac->chan.chan_id = i; 354 imxdmac->channel = i; 355 356 /* Add the channel to the DMAC list */ 357 list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); 358 } 359 360 imxdma->dev = &pdev->dev; 361 imxdma->dma_device.dev = &pdev->dev; 362 363 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 364 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 365 imxdma->dma_device.device_tx_status = imxdma_tx_status; 366 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 367 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 368 imxdma->dma_device.device_control = imxdma_control; 369 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 370 371 platform_set_drvdata(pdev, imxdma); 372 373 ret = dma_async_device_register(&imxdma->dma_device); 374 if (ret) { 375 dev_err(&pdev->dev, "unable to register\n"); 376 goto err_init; 377 } 378 379 return 0; 380 381 err_init: 382 while (--i >= 0) { 383 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 384 imx_dma_free(imxdmac->imxdma_channel); 385 } 386 387 kfree(imxdma); 388 return ret; 389 } 390 391 static int __exit imxdma_remove(struct platform_device *pdev) 392 { 393 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 394 int i; 395 396 dma_async_device_unregister(&imxdma->dma_device); 397 398 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 399 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 400 401 imx_dma_free(imxdmac->imxdma_channel); 402 } 403 404 kfree(imxdma); 405 406 return 0; 407 } 408 409 static struct platform_driver imxdma_driver = { 410 .driver = { 411 .name = "imx-dma", 412 }, 413 .remove = __exit_p(imxdma_remove), 414 }; 415 416 static int __init imxdma_module_init(void) 417 { 418 return platform_driver_probe(&imxdma_driver, imxdma_probe); 419 } 420 subsys_initcall(imxdma_module_init); 421 422 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 423 MODULE_DESCRIPTION("i.MX dma driver"); 424 MODULE_LICENSE("GPL"); 425