1 /* 2 * Driver for STM32 DMA controller 3 * 4 * Inspired by dma-jz4740.c and tegra20-apb-dma.c 5 * 6 * Copyright (C) M'boumba Cedric Madianga 2015 7 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 8 * 9 * License terms: GNU General Public License (GPL), version 2 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/jiffies.h> 19 #include <linux/list.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/of_dma.h> 24 #include <linux/platform_device.h> 25 #include <linux/reset.h> 26 #include <linux/sched.h> 27 #include <linux/slab.h> 28 29 #include "virt-dma.h" 30 31 #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ 32 #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ 33 #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ 34 #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ 35 #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ 36 #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ 37 #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ 38 #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ 39 40 /* DMA Stream x Configuration Register */ 41 #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ 42 #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) 43 #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) 44 #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) 45 #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) 46 #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) 47 #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) 48 #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) 49 #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) 50 #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) 51 #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) 52 #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) 53 #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) 54 #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) 55 #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) 56 #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ 57 #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ 58 #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ 59 #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ 60 #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ 61 #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ 62 #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ 63 #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Cplete Int Enable*/ 64 #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ 65 #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ 66 #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ 67 #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ 68 | STM32_DMA_SCR_MINC \ 69 | STM32_DMA_SCR_PINCOS \ 70 | STM32_DMA_SCR_PL_MASK) 71 #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ 72 | STM32_DMA_SCR_TEIE \ 73 | STM32_DMA_SCR_DMEIE) 74 75 /* DMA Stream x number of data register */ 76 #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) 77 78 /* DMA stream peripheral address register */ 79 #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) 80 81 /* DMA stream x memory 0 address register */ 82 #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) 83 84 /* DMA stream x memory 1 address register */ 85 #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) 86 87 /* DMA stream x FIFO control register */ 88 #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) 89 #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) 90 #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) 91 #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ 92 #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ 93 #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ 94 | STM32_DMA_SFCR_DMDIS) 95 96 /* DMA direction */ 97 #define STM32_DMA_DEV_TO_MEM 0x00 98 #define STM32_DMA_MEM_TO_DEV 0x01 99 #define STM32_DMA_MEM_TO_MEM 0x02 100 101 /* DMA priority level */ 102 #define STM32_DMA_PRIORITY_LOW 0x00 103 #define STM32_DMA_PRIORITY_MEDIUM 0x01 104 #define STM32_DMA_PRIORITY_HIGH 0x02 105 #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 106 107 /* DMA FIFO threshold selection */ 108 #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 109 #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 110 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 111 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 112 113 #define STM32_DMA_MAX_DATA_ITEMS 0xffff 114 #define STM32_DMA_MAX_CHANNELS 0x08 115 #define STM32_DMA_MAX_REQUEST_ID 0x08 116 #define STM32_DMA_MAX_DATA_PARAM 0x03 117 #define STM32_DMA_MAX_BURST 16 118 119 enum stm32_dma_width { 120 STM32_DMA_BYTE, 121 STM32_DMA_HALF_WORD, 122 STM32_DMA_WORD, 123 }; 124 125 enum stm32_dma_burst_size { 126 STM32_DMA_BURST_SINGLE, 127 STM32_DMA_BURST_INCR4, 128 STM32_DMA_BURST_INCR8, 129 STM32_DMA_BURST_INCR16, 130 }; 131 132 struct stm32_dma_cfg { 133 u32 channel_id; 134 u32 request_line; 135 u32 stream_config; 136 u32 threshold; 137 }; 138 139 struct stm32_dma_chan_reg { 140 u32 dma_lisr; 141 u32 dma_hisr; 142 u32 dma_lifcr; 143 u32 dma_hifcr; 144 u32 dma_scr; 145 u32 dma_sndtr; 146 u32 dma_spar; 147 u32 dma_sm0ar; 148 u32 dma_sm1ar; 149 u32 dma_sfcr; 150 }; 151 152 struct stm32_dma_sg_req { 153 u32 len; 154 struct stm32_dma_chan_reg chan_reg; 155 }; 156 157 struct stm32_dma_desc { 158 struct virt_dma_desc vdesc; 159 bool cyclic; 160 u32 num_sgs; 161 struct stm32_dma_sg_req sg_req[]; 162 }; 163 164 struct stm32_dma_chan { 165 struct virt_dma_chan vchan; 166 bool config_init; 167 bool busy; 168 u32 id; 169 u32 irq; 170 struct stm32_dma_desc *desc; 171 u32 next_sg; 172 struct dma_slave_config dma_sconfig; 173 struct stm32_dma_chan_reg chan_reg; 174 }; 175 176 struct stm32_dma_device { 177 struct dma_device ddev; 178 void __iomem *base; 179 struct clk *clk; 180 struct reset_control *rst; 181 bool mem2mem; 182 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; 183 }; 184 185 static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) 186 { 187 return container_of(chan->vchan.chan.device, struct stm32_dma_device, 188 ddev); 189 } 190 191 static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) 192 { 193 return container_of(c, struct stm32_dma_chan, vchan.chan); 194 } 195 196 static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) 197 { 198 return container_of(vdesc, struct stm32_dma_desc, vdesc); 199 } 200 201 static struct device *chan2dev(struct stm32_dma_chan *chan) 202 { 203 return &chan->vchan.chan.dev->device; 204 } 205 206 static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) 207 { 208 return readl_relaxed(dmadev->base + reg); 209 } 210 211 static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) 212 { 213 writel_relaxed(val, dmadev->base + reg); 214 } 215 216 static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs) 217 { 218 return kzalloc(sizeof(struct stm32_dma_desc) + 219 sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT); 220 } 221 222 static int stm32_dma_get_width(struct stm32_dma_chan *chan, 223 enum dma_slave_buswidth width) 224 { 225 switch (width) { 226 case DMA_SLAVE_BUSWIDTH_1_BYTE: 227 return STM32_DMA_BYTE; 228 case DMA_SLAVE_BUSWIDTH_2_BYTES: 229 return STM32_DMA_HALF_WORD; 230 case DMA_SLAVE_BUSWIDTH_4_BYTES: 231 return STM32_DMA_WORD; 232 default: 233 dev_err(chan2dev(chan), "Dma bus width not supported\n"); 234 return -EINVAL; 235 } 236 } 237 238 static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) 239 { 240 switch (maxburst) { 241 case 0: 242 case 1: 243 return STM32_DMA_BURST_SINGLE; 244 case 4: 245 return STM32_DMA_BURST_INCR4; 246 case 8: 247 return STM32_DMA_BURST_INCR8; 248 case 16: 249 return STM32_DMA_BURST_INCR16; 250 default: 251 dev_err(chan2dev(chan), "Dma burst size not supported\n"); 252 return -EINVAL; 253 } 254 } 255 256 static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, 257 u32 src_maxburst, u32 dst_maxburst) 258 { 259 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; 260 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; 261 262 if ((!src_maxburst) && (!dst_maxburst)) { 263 /* Using direct mode */ 264 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; 265 } else { 266 /* Using FIFO mode */ 267 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; 268 } 269 } 270 271 static int stm32_dma_slave_config(struct dma_chan *c, 272 struct dma_slave_config *config) 273 { 274 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 275 276 memcpy(&chan->dma_sconfig, config, sizeof(*config)); 277 278 chan->config_init = true; 279 280 return 0; 281 } 282 283 static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) 284 { 285 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 286 u32 flags, dma_isr; 287 288 /* 289 * Read "flags" from DMA_xISR register corresponding to the selected 290 * DMA channel at the correct bit offset inside that register. 291 * 292 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. 293 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. 294 */ 295 296 if (chan->id & 4) 297 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); 298 else 299 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); 300 301 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); 302 303 return flags; 304 } 305 306 static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) 307 { 308 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 309 u32 dma_ifcr; 310 311 /* 312 * Write "flags" to the DMA_xIFCR register corresponding to the selected 313 * DMA channel at the correct bit offset inside that register. 314 * 315 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. 316 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. 317 */ 318 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); 319 320 if (chan->id & 4) 321 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); 322 else 323 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); 324 } 325 326 static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) 327 { 328 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 329 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 330 u32 dma_scr, id; 331 332 id = chan->id; 333 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 334 335 if (dma_scr & STM32_DMA_SCR_EN) { 336 dma_scr &= ~STM32_DMA_SCR_EN; 337 stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); 338 339 do { 340 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 341 dma_scr &= STM32_DMA_SCR_EN; 342 if (!dma_scr) 343 break; 344 345 if (time_after_eq(jiffies, timeout)) { 346 dev_err(chan2dev(chan), "%s: timeout!\n", 347 __func__); 348 return -EBUSY; 349 } 350 cond_resched(); 351 } while (1); 352 } 353 354 return 0; 355 } 356 357 static void stm32_dma_stop(struct stm32_dma_chan *chan) 358 { 359 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 360 u32 dma_scr, dma_sfcr, status; 361 int ret; 362 363 /* Disable interrupts */ 364 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 365 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; 366 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); 367 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); 368 dma_sfcr &= ~STM32_DMA_SFCR_FEIE; 369 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); 370 371 /* Disable DMA */ 372 ret = stm32_dma_disable_chan(chan); 373 if (ret < 0) 374 return; 375 376 /* Clear interrupt status if it is there */ 377 status = stm32_dma_irq_status(chan); 378 if (status) { 379 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", 380 __func__, status); 381 stm32_dma_irq_clear(chan, status); 382 } 383 384 chan->busy = false; 385 } 386 387 static int stm32_dma_terminate_all(struct dma_chan *c) 388 { 389 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 390 unsigned long flags; 391 LIST_HEAD(head); 392 393 spin_lock_irqsave(&chan->vchan.lock, flags); 394 395 if (chan->busy) { 396 stm32_dma_stop(chan); 397 chan->desc = NULL; 398 } 399 400 vchan_get_all_descriptors(&chan->vchan, &head); 401 spin_unlock_irqrestore(&chan->vchan.lock, flags); 402 vchan_dma_desc_free_list(&chan->vchan, &head); 403 404 return 0; 405 } 406 407 static void stm32_dma_synchronize(struct dma_chan *c) 408 { 409 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 410 411 vchan_synchronize(&chan->vchan); 412 } 413 414 static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) 415 { 416 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 417 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 418 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); 419 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); 420 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); 421 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); 422 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); 423 424 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); 425 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); 426 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); 427 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); 428 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); 429 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); 430 } 431 432 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) 433 { 434 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 435 struct virt_dma_desc *vdesc; 436 struct stm32_dma_sg_req *sg_req; 437 struct stm32_dma_chan_reg *reg; 438 u32 status; 439 int ret; 440 441 ret = stm32_dma_disable_chan(chan); 442 if (ret < 0) 443 return; 444 445 if (!chan->desc) { 446 vdesc = vchan_next_desc(&chan->vchan); 447 if (!vdesc) 448 return; 449 450 chan->desc = to_stm32_dma_desc(vdesc); 451 chan->next_sg = 0; 452 } 453 454 if (chan->next_sg == chan->desc->num_sgs) 455 chan->next_sg = 0; 456 457 sg_req = &chan->desc->sg_req[chan->next_sg]; 458 reg = &sg_req->chan_reg; 459 460 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); 461 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); 462 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); 463 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); 464 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); 465 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); 466 467 chan->next_sg++; 468 469 /* Clear interrupt status if it is there */ 470 status = stm32_dma_irq_status(chan); 471 if (status) 472 stm32_dma_irq_clear(chan, status); 473 474 stm32_dma_dump_reg(chan); 475 476 /* Start DMA */ 477 reg->dma_scr |= STM32_DMA_SCR_EN; 478 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); 479 480 chan->busy = true; 481 482 dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 483 } 484 485 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) 486 { 487 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 488 struct stm32_dma_sg_req *sg_req; 489 u32 dma_scr, dma_sm0ar, dma_sm1ar, id; 490 491 id = chan->id; 492 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 493 494 if (dma_scr & STM32_DMA_SCR_DBM) { 495 if (chan->next_sg == chan->desc->num_sgs) 496 chan->next_sg = 0; 497 498 sg_req = &chan->desc->sg_req[chan->next_sg]; 499 500 if (dma_scr & STM32_DMA_SCR_CT) { 501 dma_sm0ar = sg_req->chan_reg.dma_sm0ar; 502 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); 503 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", 504 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); 505 } else { 506 dma_sm1ar = sg_req->chan_reg.dma_sm1ar; 507 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); 508 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", 509 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); 510 } 511 } 512 } 513 514 static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) 515 { 516 if (chan->desc) { 517 if (chan->desc->cyclic) { 518 vchan_cyclic_callback(&chan->desc->vdesc); 519 chan->next_sg++; 520 stm32_dma_configure_next_sg(chan); 521 } else { 522 chan->busy = false; 523 if (chan->next_sg == chan->desc->num_sgs) { 524 list_del(&chan->desc->vdesc.node); 525 vchan_cookie_complete(&chan->desc->vdesc); 526 chan->desc = NULL; 527 } 528 stm32_dma_start_transfer(chan); 529 } 530 } 531 } 532 533 static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) 534 { 535 struct stm32_dma_chan *chan = devid; 536 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 537 u32 status, scr; 538 539 spin_lock(&chan->vchan.lock); 540 541 status = stm32_dma_irq_status(chan); 542 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 543 544 if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) { 545 stm32_dma_irq_clear(chan, STM32_DMA_TCI); 546 stm32_dma_handle_chan_done(chan); 547 548 } else { 549 stm32_dma_irq_clear(chan, status); 550 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); 551 } 552 553 spin_unlock(&chan->vchan.lock); 554 555 return IRQ_HANDLED; 556 } 557 558 static void stm32_dma_issue_pending(struct dma_chan *c) 559 { 560 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 561 unsigned long flags; 562 563 spin_lock_irqsave(&chan->vchan.lock, flags); 564 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { 565 dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 566 stm32_dma_start_transfer(chan); 567 if (chan->desc->cyclic) 568 stm32_dma_configure_next_sg(chan); 569 } 570 spin_unlock_irqrestore(&chan->vchan.lock, flags); 571 } 572 573 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, 574 enum dma_transfer_direction direction, 575 enum dma_slave_buswidth *buswidth) 576 { 577 enum dma_slave_buswidth src_addr_width, dst_addr_width; 578 int src_bus_width, dst_bus_width; 579 int src_burst_size, dst_burst_size; 580 u32 src_maxburst, dst_maxburst; 581 u32 dma_scr = 0; 582 583 src_addr_width = chan->dma_sconfig.src_addr_width; 584 dst_addr_width = chan->dma_sconfig.dst_addr_width; 585 src_maxburst = chan->dma_sconfig.src_maxburst; 586 dst_maxburst = chan->dma_sconfig.dst_maxburst; 587 588 switch (direction) { 589 case DMA_MEM_TO_DEV: 590 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); 591 if (dst_bus_width < 0) 592 return dst_bus_width; 593 594 dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); 595 if (dst_burst_size < 0) 596 return dst_burst_size; 597 598 if (!src_addr_width) 599 src_addr_width = dst_addr_width; 600 601 src_bus_width = stm32_dma_get_width(chan, src_addr_width); 602 if (src_bus_width < 0) 603 return src_bus_width; 604 605 src_burst_size = stm32_dma_get_burst(chan, src_maxburst); 606 if (src_burst_size < 0) 607 return src_burst_size; 608 609 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | 610 STM32_DMA_SCR_PSIZE(dst_bus_width) | 611 STM32_DMA_SCR_MSIZE(src_bus_width) | 612 STM32_DMA_SCR_PBURST(dst_burst_size) | 613 STM32_DMA_SCR_MBURST(src_burst_size); 614 615 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; 616 *buswidth = dst_addr_width; 617 break; 618 619 case DMA_DEV_TO_MEM: 620 src_bus_width = stm32_dma_get_width(chan, src_addr_width); 621 if (src_bus_width < 0) 622 return src_bus_width; 623 624 src_burst_size = stm32_dma_get_burst(chan, src_maxburst); 625 if (src_burst_size < 0) 626 return src_burst_size; 627 628 if (!dst_addr_width) 629 dst_addr_width = src_addr_width; 630 631 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); 632 if (dst_bus_width < 0) 633 return dst_bus_width; 634 635 dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); 636 if (dst_burst_size < 0) 637 return dst_burst_size; 638 639 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | 640 STM32_DMA_SCR_PSIZE(src_bus_width) | 641 STM32_DMA_SCR_MSIZE(dst_bus_width) | 642 STM32_DMA_SCR_PBURST(src_burst_size) | 643 STM32_DMA_SCR_MBURST(dst_burst_size); 644 645 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; 646 *buswidth = chan->dma_sconfig.src_addr_width; 647 break; 648 649 default: 650 dev_err(chan2dev(chan), "Dma direction is not supported\n"); 651 return -EINVAL; 652 } 653 654 stm32_dma_set_fifo_config(chan, src_maxburst, dst_maxburst); 655 656 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | 657 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | 658 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); 659 chan->chan_reg.dma_scr |= dma_scr; 660 661 return 0; 662 } 663 664 static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) 665 { 666 memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); 667 } 668 669 static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( 670 struct dma_chan *c, struct scatterlist *sgl, 671 u32 sg_len, enum dma_transfer_direction direction, 672 unsigned long flags, void *context) 673 { 674 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 675 struct stm32_dma_desc *desc; 676 struct scatterlist *sg; 677 enum dma_slave_buswidth buswidth; 678 u32 nb_data_items; 679 int i, ret; 680 681 if (!chan->config_init) { 682 dev_err(chan2dev(chan), "dma channel is not configured\n"); 683 return NULL; 684 } 685 686 if (sg_len < 1) { 687 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); 688 return NULL; 689 } 690 691 desc = stm32_dma_alloc_desc(sg_len); 692 if (!desc) 693 return NULL; 694 695 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); 696 if (ret < 0) 697 goto err; 698 699 /* Set peripheral flow controller */ 700 if (chan->dma_sconfig.device_fc) 701 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; 702 else 703 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; 704 705 for_each_sg(sgl, sg, sg_len, i) { 706 desc->sg_req[i].len = sg_dma_len(sg); 707 708 nb_data_items = desc->sg_req[i].len / buswidth; 709 if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { 710 dev_err(chan2dev(chan), "nb items not supported\n"); 711 goto err; 712 } 713 714 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 715 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; 716 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; 717 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; 718 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); 719 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); 720 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; 721 } 722 723 desc->num_sgs = sg_len; 724 desc->cyclic = false; 725 726 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 727 728 err: 729 kfree(desc); 730 return NULL; 731 } 732 733 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( 734 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 735 size_t period_len, enum dma_transfer_direction direction, 736 unsigned long flags) 737 { 738 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 739 struct stm32_dma_desc *desc; 740 enum dma_slave_buswidth buswidth; 741 u32 num_periods, nb_data_items; 742 int i, ret; 743 744 if (!buf_len || !period_len) { 745 dev_err(chan2dev(chan), "Invalid buffer/period len\n"); 746 return NULL; 747 } 748 749 if (!chan->config_init) { 750 dev_err(chan2dev(chan), "dma channel is not configured\n"); 751 return NULL; 752 } 753 754 if (buf_len % period_len) { 755 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); 756 return NULL; 757 } 758 759 /* 760 * We allow to take more number of requests till DMA is 761 * not started. The driver will loop over all requests. 762 * Once DMA is started then new requests can be queued only after 763 * terminating the DMA. 764 */ 765 if (chan->busy) { 766 dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); 767 return NULL; 768 } 769 770 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); 771 if (ret < 0) 772 return NULL; 773 774 nb_data_items = period_len / buswidth; 775 if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { 776 dev_err(chan2dev(chan), "number of items not supported\n"); 777 return NULL; 778 } 779 780 /* Enable Circular mode or double buffer mode */ 781 if (buf_len == period_len) 782 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; 783 else 784 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; 785 786 /* Clear periph ctrl if client set it */ 787 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; 788 789 num_periods = buf_len / period_len; 790 791 desc = stm32_dma_alloc_desc(num_periods); 792 if (!desc) 793 return NULL; 794 795 for (i = 0; i < num_periods; i++) { 796 desc->sg_req[i].len = period_len; 797 798 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 799 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; 800 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; 801 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; 802 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; 803 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; 804 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; 805 buf_addr += period_len; 806 } 807 808 desc->num_sgs = num_periods; 809 desc->cyclic = true; 810 811 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 812 } 813 814 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( 815 struct dma_chan *c, dma_addr_t dest, 816 dma_addr_t src, size_t len, unsigned long flags) 817 { 818 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 819 u32 num_sgs; 820 struct stm32_dma_desc *desc; 821 size_t xfer_count, offset; 822 int i; 823 824 num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS); 825 desc = stm32_dma_alloc_desc(num_sgs); 826 if (!desc) 827 return NULL; 828 829 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { 830 xfer_count = min_t(size_t, len - offset, 831 STM32_DMA_MAX_DATA_ITEMS); 832 833 desc->sg_req[i].len = xfer_count; 834 835 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 836 desc->sg_req[i].chan_reg.dma_scr = 837 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | 838 STM32_DMA_SCR_MINC | 839 STM32_DMA_SCR_PINC | 840 STM32_DMA_SCR_TCIE | 841 STM32_DMA_SCR_TEIE; 842 desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS | 843 STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) | 844 STM32_DMA_SFCR_FEIE; 845 desc->sg_req[i].chan_reg.dma_spar = src + offset; 846 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; 847 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; 848 } 849 850 desc->num_sgs = num_sgs; 851 desc->cyclic = false; 852 853 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 854 } 855 856 static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) 857 { 858 u32 dma_scr, width, ndtr; 859 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 860 861 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 862 width = STM32_DMA_SCR_PSIZE_GET(dma_scr); 863 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); 864 865 return ndtr << width; 866 } 867 868 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, 869 struct stm32_dma_desc *desc, 870 u32 next_sg) 871 { 872 u32 residue = 0; 873 int i; 874 875 /* 876 * In cyclic mode, for the last period, residue = remaining bytes from 877 * NDTR 878 */ 879 if (chan->desc->cyclic && next_sg == 0) 880 return stm32_dma_get_remaining_bytes(chan); 881 882 /* 883 * For all other periods in cyclic mode, and in sg mode, 884 * residue = remaining bytes from NDTR + remaining periods/sg to be 885 * transferred 886 */ 887 for (i = next_sg; i < desc->num_sgs; i++) 888 residue += desc->sg_req[i].len; 889 residue += stm32_dma_get_remaining_bytes(chan); 890 891 return residue; 892 } 893 894 static enum dma_status stm32_dma_tx_status(struct dma_chan *c, 895 dma_cookie_t cookie, 896 struct dma_tx_state *state) 897 { 898 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 899 struct virt_dma_desc *vdesc; 900 enum dma_status status; 901 unsigned long flags; 902 u32 residue = 0; 903 904 status = dma_cookie_status(c, cookie, state); 905 if ((status == DMA_COMPLETE) || (!state)) 906 return status; 907 908 spin_lock_irqsave(&chan->vchan.lock, flags); 909 vdesc = vchan_find_desc(&chan->vchan, cookie); 910 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) 911 residue = stm32_dma_desc_residue(chan, chan->desc, 912 chan->next_sg); 913 else if (vdesc) 914 residue = stm32_dma_desc_residue(chan, 915 to_stm32_dma_desc(vdesc), 0); 916 dma_set_residue(state, residue); 917 918 spin_unlock_irqrestore(&chan->vchan.lock, flags); 919 920 return status; 921 } 922 923 static int stm32_dma_alloc_chan_resources(struct dma_chan *c) 924 { 925 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 926 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 927 int ret; 928 929 chan->config_init = false; 930 ret = clk_prepare_enable(dmadev->clk); 931 if (ret < 0) { 932 dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); 933 return ret; 934 } 935 936 ret = stm32_dma_disable_chan(chan); 937 if (ret < 0) 938 clk_disable_unprepare(dmadev->clk); 939 940 return ret; 941 } 942 943 static void stm32_dma_free_chan_resources(struct dma_chan *c) 944 { 945 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 946 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 947 unsigned long flags; 948 949 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); 950 951 if (chan->busy) { 952 spin_lock_irqsave(&chan->vchan.lock, flags); 953 stm32_dma_stop(chan); 954 chan->desc = NULL; 955 spin_unlock_irqrestore(&chan->vchan.lock, flags); 956 } 957 958 clk_disable_unprepare(dmadev->clk); 959 960 vchan_free_chan_resources(to_virt_chan(c)); 961 } 962 963 static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) 964 { 965 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); 966 } 967 968 static void stm32_dma_set_config(struct stm32_dma_chan *chan, 969 struct stm32_dma_cfg *cfg) 970 { 971 stm32_dma_clear_reg(&chan->chan_reg); 972 973 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; 974 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); 975 976 /* Enable Interrupts */ 977 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; 978 979 chan->chan_reg.dma_sfcr = cfg->threshold & STM32_DMA_SFCR_FTH_MASK; 980 } 981 982 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, 983 struct of_dma *ofdma) 984 { 985 struct stm32_dma_device *dmadev = ofdma->of_dma_data; 986 struct device *dev = dmadev->ddev.dev; 987 struct stm32_dma_cfg cfg; 988 struct stm32_dma_chan *chan; 989 struct dma_chan *c; 990 991 if (dma_spec->args_count < 4) { 992 dev_err(dev, "Bad number of cells\n"); 993 return NULL; 994 } 995 996 cfg.channel_id = dma_spec->args[0]; 997 cfg.request_line = dma_spec->args[1]; 998 cfg.stream_config = dma_spec->args[2]; 999 cfg.threshold = dma_spec->args[3]; 1000 1001 if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || 1002 (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) { 1003 dev_err(dev, "Bad channel and/or request id\n"); 1004 return NULL; 1005 } 1006 1007 chan = &dmadev->chan[cfg.channel_id]; 1008 1009 c = dma_get_slave_channel(&chan->vchan.chan); 1010 if (!c) { 1011 dev_err(dev, "No more channels available\n"); 1012 return NULL; 1013 } 1014 1015 stm32_dma_set_config(chan, &cfg); 1016 1017 return c; 1018 } 1019 1020 static const struct of_device_id stm32_dma_of_match[] = { 1021 { .compatible = "st,stm32-dma", }, 1022 { /* sentinel */ }, 1023 }; 1024 MODULE_DEVICE_TABLE(of, stm32_dma_of_match); 1025 1026 static int stm32_dma_probe(struct platform_device *pdev) 1027 { 1028 struct stm32_dma_chan *chan; 1029 struct stm32_dma_device *dmadev; 1030 struct dma_device *dd; 1031 const struct of_device_id *match; 1032 struct resource *res; 1033 int i, ret; 1034 1035 match = of_match_device(stm32_dma_of_match, &pdev->dev); 1036 if (!match) { 1037 dev_err(&pdev->dev, "Error: No device match found\n"); 1038 return -ENODEV; 1039 } 1040 1041 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 1042 if (!dmadev) 1043 return -ENOMEM; 1044 1045 dd = &dmadev->ddev; 1046 1047 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1048 dmadev->base = devm_ioremap_resource(&pdev->dev, res); 1049 if (IS_ERR(dmadev->base)) 1050 return PTR_ERR(dmadev->base); 1051 1052 dmadev->clk = devm_clk_get(&pdev->dev, NULL); 1053 if (IS_ERR(dmadev->clk)) { 1054 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1055 return PTR_ERR(dmadev->clk); 1056 } 1057 1058 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, 1059 "st,mem2mem"); 1060 1061 dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); 1062 if (!IS_ERR(dmadev->rst)) { 1063 reset_control_assert(dmadev->rst); 1064 udelay(2); 1065 reset_control_deassert(dmadev->rst); 1066 } 1067 1068 dma_cap_set(DMA_SLAVE, dd->cap_mask); 1069 dma_cap_set(DMA_PRIVATE, dd->cap_mask); 1070 dma_cap_set(DMA_CYCLIC, dd->cap_mask); 1071 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; 1072 dd->device_free_chan_resources = stm32_dma_free_chan_resources; 1073 dd->device_tx_status = stm32_dma_tx_status; 1074 dd->device_issue_pending = stm32_dma_issue_pending; 1075 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; 1076 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; 1077 dd->device_config = stm32_dma_slave_config; 1078 dd->device_terminate_all = stm32_dma_terminate_all; 1079 dd->device_synchronize = stm32_dma_synchronize; 1080 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1081 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1082 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1083 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1084 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1085 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1086 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1087 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1088 dd->max_burst = STM32_DMA_MAX_BURST; 1089 dd->dev = &pdev->dev; 1090 INIT_LIST_HEAD(&dd->channels); 1091 1092 if (dmadev->mem2mem) { 1093 dma_cap_set(DMA_MEMCPY, dd->cap_mask); 1094 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; 1095 dd->directions |= BIT(DMA_MEM_TO_MEM); 1096 } 1097 1098 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1099 chan = &dmadev->chan[i]; 1100 chan->id = i; 1101 chan->vchan.desc_free = stm32_dma_desc_free; 1102 vchan_init(&chan->vchan, dd); 1103 } 1104 1105 ret = dma_async_device_register(dd); 1106 if (ret) 1107 return ret; 1108 1109 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1110 chan = &dmadev->chan[i]; 1111 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1112 if (!res) { 1113 ret = -EINVAL; 1114 dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1115 goto err_unregister; 1116 } 1117 chan->irq = res->start; 1118 ret = devm_request_irq(&pdev->dev, chan->irq, 1119 stm32_dma_chan_irq, 0, 1120 dev_name(chan2dev(chan)), chan); 1121 if (ret) { 1122 dev_err(&pdev->dev, 1123 "request_irq failed with err %d channel %d\n", 1124 ret, i); 1125 goto err_unregister; 1126 } 1127 } 1128 1129 ret = of_dma_controller_register(pdev->dev.of_node, 1130 stm32_dma_of_xlate, dmadev); 1131 if (ret < 0) { 1132 dev_err(&pdev->dev, 1133 "STM32 DMA DMA OF registration failed %d\n", ret); 1134 goto err_unregister; 1135 } 1136 1137 platform_set_drvdata(pdev, dmadev); 1138 1139 dev_info(&pdev->dev, "STM32 DMA driver registered\n"); 1140 1141 return 0; 1142 1143 err_unregister: 1144 dma_async_device_unregister(dd); 1145 1146 return ret; 1147 } 1148 1149 static struct platform_driver stm32_dma_driver = { 1150 .driver = { 1151 .name = "stm32-dma", 1152 .of_match_table = stm32_dma_of_match, 1153 }, 1154 }; 1155 1156 static int __init stm32_dma_init(void) 1157 { 1158 return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); 1159 } 1160 subsys_initcall(stm32_dma_init); 1161