1 /* 2 * Driver for STM32 DMA controller 3 * 4 * Inspired by dma-jz4740.c and tegra20-apb-dma.c 5 * 6 * Copyright (C) M'boumba Cedric Madianga 2015 7 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 8 * Pierre-Yves Mordret <pierre-yves.mordret@st.com> 9 * 10 * License terms: GNU General Public License (GPL), version 2 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/delay.h> 15 #include <linux/dmaengine.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/err.h> 18 #include <linux/init.h> 19 #include <linux/jiffies.h> 20 #include <linux/list.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/of_dma.h> 25 #include <linux/platform_device.h> 26 #include <linux/reset.h> 27 #include <linux/sched.h> 28 #include <linux/slab.h> 29 30 #include "virt-dma.h" 31 32 #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ 33 #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ 34 #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ 35 #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ 36 #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ 37 #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ 38 #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ 39 #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ 40 #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ 41 #define STM32_DMA_MASKI (STM32_DMA_TCI \ 42 | STM32_DMA_TEI \ 43 | STM32_DMA_DMEI \ 44 | STM32_DMA_FEI) 45 46 /* DMA Stream x Configuration Register */ 47 #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ 48 #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) 49 #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) 50 #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) 51 #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) 52 #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) 53 #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) 54 #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) 55 #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) 56 #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) 57 #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) 58 #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) 59 #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) 60 #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) 61 #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) 62 #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ 63 #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ 64 #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ 65 #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ 66 #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ 67 #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ 68 #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ 69 #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable 70 */ 71 #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ 72 #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ 73 #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ 74 #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ 75 | STM32_DMA_SCR_MINC \ 76 | STM32_DMA_SCR_PINCOS \ 77 | STM32_DMA_SCR_PL_MASK) 78 #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ 79 | STM32_DMA_SCR_TEIE \ 80 | STM32_DMA_SCR_DMEIE) 81 82 /* DMA Stream x number of data register */ 83 #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) 84 85 /* DMA stream peripheral address register */ 86 #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) 87 88 /* DMA stream x memory 0 address register */ 89 #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) 90 91 /* DMA stream x memory 1 address register */ 92 #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) 93 94 /* DMA stream x FIFO control register */ 95 #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) 96 #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) 97 #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) 98 #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ 99 #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ 100 #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ 101 | STM32_DMA_SFCR_DMDIS) 102 103 /* DMA direction */ 104 #define STM32_DMA_DEV_TO_MEM 0x00 105 #define STM32_DMA_MEM_TO_DEV 0x01 106 #define STM32_DMA_MEM_TO_MEM 0x02 107 108 /* DMA priority level */ 109 #define STM32_DMA_PRIORITY_LOW 0x00 110 #define STM32_DMA_PRIORITY_MEDIUM 0x01 111 #define STM32_DMA_PRIORITY_HIGH 0x02 112 #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 113 114 /* DMA FIFO threshold selection */ 115 #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 116 #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 117 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 118 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 119 120 #define STM32_DMA_MAX_DATA_ITEMS 0xffff 121 /* 122 * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter 123 * gather at boundary. Thus it's safer to round down this value on FIFO 124 * size (16 Bytes) 125 */ 126 #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ 127 ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) 128 #define STM32_DMA_MAX_CHANNELS 0x08 129 #define STM32_DMA_MAX_REQUEST_ID 0x08 130 #define STM32_DMA_MAX_DATA_PARAM 0x03 131 #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ 132 #define STM32_DMA_MIN_BURST 4 133 #define STM32_DMA_MAX_BURST 16 134 135 /* DMA Features */ 136 #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) 137 #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) 138 139 enum stm32_dma_width { 140 STM32_DMA_BYTE, 141 STM32_DMA_HALF_WORD, 142 STM32_DMA_WORD, 143 }; 144 145 enum stm32_dma_burst_size { 146 STM32_DMA_BURST_SINGLE, 147 STM32_DMA_BURST_INCR4, 148 STM32_DMA_BURST_INCR8, 149 STM32_DMA_BURST_INCR16, 150 }; 151 152 /** 153 * struct stm32_dma_cfg - STM32 DMA custom configuration 154 * @channel_id: channel ID 155 * @request_line: DMA request 156 * @stream_config: 32bit mask specifying the DMA channel configuration 157 * @features: 32bit mask specifying the DMA Feature list 158 */ 159 struct stm32_dma_cfg { 160 u32 channel_id; 161 u32 request_line; 162 u32 stream_config; 163 u32 features; 164 }; 165 166 struct stm32_dma_chan_reg { 167 u32 dma_lisr; 168 u32 dma_hisr; 169 u32 dma_lifcr; 170 u32 dma_hifcr; 171 u32 dma_scr; 172 u32 dma_sndtr; 173 u32 dma_spar; 174 u32 dma_sm0ar; 175 u32 dma_sm1ar; 176 u32 dma_sfcr; 177 }; 178 179 struct stm32_dma_sg_req { 180 u32 len; 181 struct stm32_dma_chan_reg chan_reg; 182 }; 183 184 struct stm32_dma_desc { 185 struct virt_dma_desc vdesc; 186 bool cyclic; 187 u32 num_sgs; 188 struct stm32_dma_sg_req sg_req[]; 189 }; 190 191 struct stm32_dma_chan { 192 struct virt_dma_chan vchan; 193 bool config_init; 194 bool busy; 195 u32 id; 196 u32 irq; 197 struct stm32_dma_desc *desc; 198 u32 next_sg; 199 struct dma_slave_config dma_sconfig; 200 struct stm32_dma_chan_reg chan_reg; 201 u32 threshold; 202 u32 mem_burst; 203 u32 mem_width; 204 }; 205 206 struct stm32_dma_device { 207 struct dma_device ddev; 208 void __iomem *base; 209 struct clk *clk; 210 struct reset_control *rst; 211 bool mem2mem; 212 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; 213 }; 214 215 static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) 216 { 217 return container_of(chan->vchan.chan.device, struct stm32_dma_device, 218 ddev); 219 } 220 221 static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) 222 { 223 return container_of(c, struct stm32_dma_chan, vchan.chan); 224 } 225 226 static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) 227 { 228 return container_of(vdesc, struct stm32_dma_desc, vdesc); 229 } 230 231 static struct device *chan2dev(struct stm32_dma_chan *chan) 232 { 233 return &chan->vchan.chan.dev->device; 234 } 235 236 static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) 237 { 238 return readl_relaxed(dmadev->base + reg); 239 } 240 241 static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) 242 { 243 writel_relaxed(val, dmadev->base + reg); 244 } 245 246 static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs) 247 { 248 return kzalloc(sizeof(struct stm32_dma_desc) + 249 sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT); 250 } 251 252 static int stm32_dma_get_width(struct stm32_dma_chan *chan, 253 enum dma_slave_buswidth width) 254 { 255 switch (width) { 256 case DMA_SLAVE_BUSWIDTH_1_BYTE: 257 return STM32_DMA_BYTE; 258 case DMA_SLAVE_BUSWIDTH_2_BYTES: 259 return STM32_DMA_HALF_WORD; 260 case DMA_SLAVE_BUSWIDTH_4_BYTES: 261 return STM32_DMA_WORD; 262 default: 263 dev_err(chan2dev(chan), "Dma bus width not supported\n"); 264 return -EINVAL; 265 } 266 } 267 268 static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, 269 u32 threshold) 270 { 271 enum dma_slave_buswidth max_width; 272 273 if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) 274 max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 275 else 276 max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 277 278 while ((buf_len < max_width || buf_len % max_width) && 279 max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) 280 max_width = max_width >> 1; 281 282 return max_width; 283 } 284 285 static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, 286 enum dma_slave_buswidth width) 287 { 288 u32 remaining; 289 290 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { 291 if (burst != 0) { 292 /* 293 * If number of beats fit in several whole bursts 294 * this configuration is allowed. 295 */ 296 remaining = ((STM32_DMA_FIFO_SIZE / width) * 297 (threshold + 1) / 4) % burst; 298 299 if (remaining == 0) 300 return true; 301 } else { 302 return true; 303 } 304 } 305 306 return false; 307 } 308 309 static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) 310 { 311 switch (threshold) { 312 case STM32_DMA_FIFO_THRESHOLD_FULL: 313 if (buf_len >= STM32_DMA_MAX_BURST) 314 return true; 315 else 316 return false; 317 case STM32_DMA_FIFO_THRESHOLD_HALFFULL: 318 if (buf_len >= STM32_DMA_MAX_BURST / 2) 319 return true; 320 else 321 return false; 322 default: 323 return false; 324 } 325 } 326 327 static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, 328 enum dma_slave_buswidth width) 329 { 330 u32 best_burst = max_burst; 331 332 if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) 333 return 0; 334 335 while ((buf_len < best_burst * width && best_burst > 1) || 336 !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold, 337 width)) { 338 if (best_burst > STM32_DMA_MIN_BURST) 339 best_burst = best_burst >> 1; 340 else 341 best_burst = 0; 342 } 343 344 return best_burst; 345 } 346 347 static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) 348 { 349 switch (maxburst) { 350 case 0: 351 case 1: 352 return STM32_DMA_BURST_SINGLE; 353 case 4: 354 return STM32_DMA_BURST_INCR4; 355 case 8: 356 return STM32_DMA_BURST_INCR8; 357 case 16: 358 return STM32_DMA_BURST_INCR16; 359 default: 360 dev_err(chan2dev(chan), "Dma burst size not supported\n"); 361 return -EINVAL; 362 } 363 } 364 365 static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, 366 u32 src_burst, u32 dst_burst) 367 { 368 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; 369 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; 370 371 if (!src_burst && !dst_burst) { 372 /* Using direct mode */ 373 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; 374 } else { 375 /* Using FIFO mode */ 376 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; 377 } 378 } 379 380 static int stm32_dma_slave_config(struct dma_chan *c, 381 struct dma_slave_config *config) 382 { 383 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 384 385 memcpy(&chan->dma_sconfig, config, sizeof(*config)); 386 387 chan->config_init = true; 388 389 return 0; 390 } 391 392 static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) 393 { 394 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 395 u32 flags, dma_isr; 396 397 /* 398 * Read "flags" from DMA_xISR register corresponding to the selected 399 * DMA channel at the correct bit offset inside that register. 400 * 401 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. 402 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. 403 */ 404 405 if (chan->id & 4) 406 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); 407 else 408 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); 409 410 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); 411 412 return flags & STM32_DMA_MASKI; 413 } 414 415 static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) 416 { 417 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 418 u32 dma_ifcr; 419 420 /* 421 * Write "flags" to the DMA_xIFCR register corresponding to the selected 422 * DMA channel at the correct bit offset inside that register. 423 * 424 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. 425 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. 426 */ 427 flags &= STM32_DMA_MASKI; 428 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); 429 430 if (chan->id & 4) 431 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); 432 else 433 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); 434 } 435 436 static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) 437 { 438 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 439 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 440 u32 dma_scr, id; 441 442 id = chan->id; 443 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 444 445 if (dma_scr & STM32_DMA_SCR_EN) { 446 dma_scr &= ~STM32_DMA_SCR_EN; 447 stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); 448 449 do { 450 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 451 dma_scr &= STM32_DMA_SCR_EN; 452 if (!dma_scr) 453 break; 454 455 if (time_after_eq(jiffies, timeout)) { 456 dev_err(chan2dev(chan), "%s: timeout!\n", 457 __func__); 458 return -EBUSY; 459 } 460 cond_resched(); 461 } while (1); 462 } 463 464 return 0; 465 } 466 467 static void stm32_dma_stop(struct stm32_dma_chan *chan) 468 { 469 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 470 u32 dma_scr, dma_sfcr, status; 471 int ret; 472 473 /* Disable interrupts */ 474 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 475 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; 476 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); 477 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); 478 dma_sfcr &= ~STM32_DMA_SFCR_FEIE; 479 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); 480 481 /* Disable DMA */ 482 ret = stm32_dma_disable_chan(chan); 483 if (ret < 0) 484 return; 485 486 /* Clear interrupt status if it is there */ 487 status = stm32_dma_irq_status(chan); 488 if (status) { 489 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", 490 __func__, status); 491 stm32_dma_irq_clear(chan, status); 492 } 493 494 chan->busy = false; 495 } 496 497 static int stm32_dma_terminate_all(struct dma_chan *c) 498 { 499 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 500 unsigned long flags; 501 LIST_HEAD(head); 502 503 spin_lock_irqsave(&chan->vchan.lock, flags); 504 505 if (chan->busy) { 506 stm32_dma_stop(chan); 507 chan->desc = NULL; 508 } 509 510 vchan_get_all_descriptors(&chan->vchan, &head); 511 spin_unlock_irqrestore(&chan->vchan.lock, flags); 512 vchan_dma_desc_free_list(&chan->vchan, &head); 513 514 return 0; 515 } 516 517 static void stm32_dma_synchronize(struct dma_chan *c) 518 { 519 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 520 521 vchan_synchronize(&chan->vchan); 522 } 523 524 static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) 525 { 526 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 527 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 528 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); 529 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); 530 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); 531 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); 532 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); 533 534 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); 535 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); 536 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); 537 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); 538 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); 539 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); 540 } 541 542 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); 543 544 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) 545 { 546 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 547 struct virt_dma_desc *vdesc; 548 struct stm32_dma_sg_req *sg_req; 549 struct stm32_dma_chan_reg *reg; 550 u32 status; 551 int ret; 552 553 ret = stm32_dma_disable_chan(chan); 554 if (ret < 0) 555 return; 556 557 if (!chan->desc) { 558 vdesc = vchan_next_desc(&chan->vchan); 559 if (!vdesc) 560 return; 561 562 chan->desc = to_stm32_dma_desc(vdesc); 563 chan->next_sg = 0; 564 } 565 566 if (chan->next_sg == chan->desc->num_sgs) 567 chan->next_sg = 0; 568 569 sg_req = &chan->desc->sg_req[chan->next_sg]; 570 reg = &sg_req->chan_reg; 571 572 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); 573 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); 574 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); 575 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); 576 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); 577 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); 578 579 chan->next_sg++; 580 581 /* Clear interrupt status if it is there */ 582 status = stm32_dma_irq_status(chan); 583 if (status) 584 stm32_dma_irq_clear(chan, status); 585 586 if (chan->desc->cyclic) 587 stm32_dma_configure_next_sg(chan); 588 589 stm32_dma_dump_reg(chan); 590 591 /* Start DMA */ 592 reg->dma_scr |= STM32_DMA_SCR_EN; 593 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); 594 595 chan->busy = true; 596 597 dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 598 } 599 600 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) 601 { 602 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 603 struct stm32_dma_sg_req *sg_req; 604 u32 dma_scr, dma_sm0ar, dma_sm1ar, id; 605 606 id = chan->id; 607 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 608 609 if (dma_scr & STM32_DMA_SCR_DBM) { 610 if (chan->next_sg == chan->desc->num_sgs) 611 chan->next_sg = 0; 612 613 sg_req = &chan->desc->sg_req[chan->next_sg]; 614 615 if (dma_scr & STM32_DMA_SCR_CT) { 616 dma_sm0ar = sg_req->chan_reg.dma_sm0ar; 617 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); 618 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", 619 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); 620 } else { 621 dma_sm1ar = sg_req->chan_reg.dma_sm1ar; 622 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); 623 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", 624 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); 625 } 626 } 627 } 628 629 static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) 630 { 631 if (chan->desc) { 632 if (chan->desc->cyclic) { 633 vchan_cyclic_callback(&chan->desc->vdesc); 634 chan->next_sg++; 635 stm32_dma_configure_next_sg(chan); 636 } else { 637 chan->busy = false; 638 if (chan->next_sg == chan->desc->num_sgs) { 639 list_del(&chan->desc->vdesc.node); 640 vchan_cookie_complete(&chan->desc->vdesc); 641 chan->desc = NULL; 642 } 643 stm32_dma_start_transfer(chan); 644 } 645 } 646 } 647 648 static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) 649 { 650 struct stm32_dma_chan *chan = devid; 651 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 652 u32 status, scr; 653 654 spin_lock(&chan->vchan.lock); 655 656 status = stm32_dma_irq_status(chan); 657 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 658 659 if (status & STM32_DMA_TCI) { 660 stm32_dma_irq_clear(chan, STM32_DMA_TCI); 661 if (scr & STM32_DMA_SCR_TCIE) 662 stm32_dma_handle_chan_done(chan); 663 status &= ~STM32_DMA_TCI; 664 } 665 if (status & STM32_DMA_HTI) { 666 stm32_dma_irq_clear(chan, STM32_DMA_HTI); 667 status &= ~STM32_DMA_HTI; 668 } 669 if (status & STM32_DMA_FEI) { 670 stm32_dma_irq_clear(chan, STM32_DMA_FEI); 671 status &= ~STM32_DMA_FEI; 672 if (!(scr & STM32_DMA_SCR_EN)) 673 dev_err(chan2dev(chan), "FIFO Error\n"); 674 else 675 dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); 676 } 677 if (status) { 678 stm32_dma_irq_clear(chan, status); 679 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); 680 if (!(scr & STM32_DMA_SCR_EN)) 681 dev_err(chan2dev(chan), "chan disabled by HW\n"); 682 } 683 684 spin_unlock(&chan->vchan.lock); 685 686 return IRQ_HANDLED; 687 } 688 689 static void stm32_dma_issue_pending(struct dma_chan *c) 690 { 691 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 692 unsigned long flags; 693 694 spin_lock_irqsave(&chan->vchan.lock, flags); 695 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { 696 dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 697 stm32_dma_start_transfer(chan); 698 699 } 700 spin_unlock_irqrestore(&chan->vchan.lock, flags); 701 } 702 703 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, 704 enum dma_transfer_direction direction, 705 enum dma_slave_buswidth *buswidth, 706 u32 buf_len) 707 { 708 enum dma_slave_buswidth src_addr_width, dst_addr_width; 709 int src_bus_width, dst_bus_width; 710 int src_burst_size, dst_burst_size; 711 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; 712 u32 dma_scr, threshold; 713 714 src_addr_width = chan->dma_sconfig.src_addr_width; 715 dst_addr_width = chan->dma_sconfig.dst_addr_width; 716 src_maxburst = chan->dma_sconfig.src_maxburst; 717 dst_maxburst = chan->dma_sconfig.dst_maxburst; 718 threshold = chan->threshold; 719 720 switch (direction) { 721 case DMA_MEM_TO_DEV: 722 /* Set device data size */ 723 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); 724 if (dst_bus_width < 0) 725 return dst_bus_width; 726 727 /* Set device burst size */ 728 dst_best_burst = stm32_dma_get_best_burst(buf_len, 729 dst_maxburst, 730 threshold, 731 dst_addr_width); 732 733 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); 734 if (dst_burst_size < 0) 735 return dst_burst_size; 736 737 /* Set memory data size */ 738 src_addr_width = stm32_dma_get_max_width(buf_len, threshold); 739 chan->mem_width = src_addr_width; 740 src_bus_width = stm32_dma_get_width(chan, src_addr_width); 741 if (src_bus_width < 0) 742 return src_bus_width; 743 744 /* Set memory burst size */ 745 src_maxburst = STM32_DMA_MAX_BURST; 746 src_best_burst = stm32_dma_get_best_burst(buf_len, 747 src_maxburst, 748 threshold, 749 src_addr_width); 750 src_burst_size = stm32_dma_get_burst(chan, src_best_burst); 751 if (src_burst_size < 0) 752 return src_burst_size; 753 754 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | 755 STM32_DMA_SCR_PSIZE(dst_bus_width) | 756 STM32_DMA_SCR_MSIZE(src_bus_width) | 757 STM32_DMA_SCR_PBURST(dst_burst_size) | 758 STM32_DMA_SCR_MBURST(src_burst_size); 759 760 /* Set FIFO threshold */ 761 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; 762 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); 763 764 /* Set peripheral address */ 765 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; 766 *buswidth = dst_addr_width; 767 break; 768 769 case DMA_DEV_TO_MEM: 770 /* Set device data size */ 771 src_bus_width = stm32_dma_get_width(chan, src_addr_width); 772 if (src_bus_width < 0) 773 return src_bus_width; 774 775 /* Set device burst size */ 776 src_best_burst = stm32_dma_get_best_burst(buf_len, 777 src_maxburst, 778 threshold, 779 src_addr_width); 780 chan->mem_burst = src_best_burst; 781 src_burst_size = stm32_dma_get_burst(chan, src_best_burst); 782 if (src_burst_size < 0) 783 return src_burst_size; 784 785 /* Set memory data size */ 786 dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); 787 chan->mem_width = dst_addr_width; 788 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); 789 if (dst_bus_width < 0) 790 return dst_bus_width; 791 792 /* Set memory burst size */ 793 dst_maxburst = STM32_DMA_MAX_BURST; 794 dst_best_burst = stm32_dma_get_best_burst(buf_len, 795 dst_maxburst, 796 threshold, 797 dst_addr_width); 798 chan->mem_burst = dst_best_burst; 799 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); 800 if (dst_burst_size < 0) 801 return dst_burst_size; 802 803 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | 804 STM32_DMA_SCR_PSIZE(src_bus_width) | 805 STM32_DMA_SCR_MSIZE(dst_bus_width) | 806 STM32_DMA_SCR_PBURST(src_burst_size) | 807 STM32_DMA_SCR_MBURST(dst_burst_size); 808 809 /* Set FIFO threshold */ 810 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; 811 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); 812 813 /* Set peripheral address */ 814 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; 815 *buswidth = chan->dma_sconfig.src_addr_width; 816 break; 817 818 default: 819 dev_err(chan2dev(chan), "Dma direction is not supported\n"); 820 return -EINVAL; 821 } 822 823 stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst); 824 825 /* Set DMA control register */ 826 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | 827 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | 828 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); 829 chan->chan_reg.dma_scr |= dma_scr; 830 831 return 0; 832 } 833 834 static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) 835 { 836 memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); 837 } 838 839 static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( 840 struct dma_chan *c, struct scatterlist *sgl, 841 u32 sg_len, enum dma_transfer_direction direction, 842 unsigned long flags, void *context) 843 { 844 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 845 struct stm32_dma_desc *desc; 846 struct scatterlist *sg; 847 enum dma_slave_buswidth buswidth; 848 u32 nb_data_items; 849 int i, ret; 850 851 if (!chan->config_init) { 852 dev_err(chan2dev(chan), "dma channel is not configured\n"); 853 return NULL; 854 } 855 856 if (sg_len < 1) { 857 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); 858 return NULL; 859 } 860 861 desc = stm32_dma_alloc_desc(sg_len); 862 if (!desc) 863 return NULL; 864 865 /* Set peripheral flow controller */ 866 if (chan->dma_sconfig.device_fc) 867 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; 868 else 869 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; 870 871 for_each_sg(sgl, sg, sg_len, i) { 872 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, 873 sg_dma_len(sg)); 874 if (ret < 0) 875 goto err; 876 877 desc->sg_req[i].len = sg_dma_len(sg); 878 879 nb_data_items = desc->sg_req[i].len / buswidth; 880 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { 881 dev_err(chan2dev(chan), "nb items not supported\n"); 882 goto err; 883 } 884 885 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 886 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; 887 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; 888 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; 889 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); 890 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); 891 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; 892 } 893 894 desc->num_sgs = sg_len; 895 desc->cyclic = false; 896 897 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 898 899 err: 900 kfree(desc); 901 return NULL; 902 } 903 904 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( 905 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 906 size_t period_len, enum dma_transfer_direction direction, 907 unsigned long flags) 908 { 909 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 910 struct stm32_dma_desc *desc; 911 enum dma_slave_buswidth buswidth; 912 u32 num_periods, nb_data_items; 913 int i, ret; 914 915 if (!buf_len || !period_len) { 916 dev_err(chan2dev(chan), "Invalid buffer/period len\n"); 917 return NULL; 918 } 919 920 if (!chan->config_init) { 921 dev_err(chan2dev(chan), "dma channel is not configured\n"); 922 return NULL; 923 } 924 925 if (buf_len % period_len) { 926 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); 927 return NULL; 928 } 929 930 /* 931 * We allow to take more number of requests till DMA is 932 * not started. The driver will loop over all requests. 933 * Once DMA is started then new requests can be queued only after 934 * terminating the DMA. 935 */ 936 if (chan->busy) { 937 dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); 938 return NULL; 939 } 940 941 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len); 942 if (ret < 0) 943 return NULL; 944 945 nb_data_items = period_len / buswidth; 946 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { 947 dev_err(chan2dev(chan), "number of items not supported\n"); 948 return NULL; 949 } 950 951 /* Enable Circular mode or double buffer mode */ 952 if (buf_len == period_len) 953 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; 954 else 955 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; 956 957 /* Clear periph ctrl if client set it */ 958 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; 959 960 num_periods = buf_len / period_len; 961 962 desc = stm32_dma_alloc_desc(num_periods); 963 if (!desc) 964 return NULL; 965 966 for (i = 0; i < num_periods; i++) { 967 desc->sg_req[i].len = period_len; 968 969 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 970 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; 971 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; 972 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; 973 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; 974 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; 975 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; 976 buf_addr += period_len; 977 } 978 979 desc->num_sgs = num_periods; 980 desc->cyclic = true; 981 982 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 983 } 984 985 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( 986 struct dma_chan *c, dma_addr_t dest, 987 dma_addr_t src, size_t len, unsigned long flags) 988 { 989 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 990 enum dma_slave_buswidth max_width; 991 struct stm32_dma_desc *desc; 992 size_t xfer_count, offset; 993 u32 num_sgs, best_burst, dma_burst, threshold; 994 int i; 995 996 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); 997 desc = stm32_dma_alloc_desc(num_sgs); 998 if (!desc) 999 return NULL; 1000 1001 threshold = chan->threshold; 1002 1003 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { 1004 xfer_count = min_t(size_t, len - offset, 1005 STM32_DMA_ALIGNED_MAX_DATA_ITEMS); 1006 1007 /* Compute best burst size */ 1008 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1009 best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, 1010 threshold, max_width); 1011 dma_burst = stm32_dma_get_burst(chan, best_burst); 1012 1013 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); 1014 desc->sg_req[i].chan_reg.dma_scr = 1015 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | 1016 STM32_DMA_SCR_PBURST(dma_burst) | 1017 STM32_DMA_SCR_MBURST(dma_burst) | 1018 STM32_DMA_SCR_MINC | 1019 STM32_DMA_SCR_PINC | 1020 STM32_DMA_SCR_TCIE | 1021 STM32_DMA_SCR_TEIE; 1022 desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; 1023 desc->sg_req[i].chan_reg.dma_sfcr |= 1024 STM32_DMA_SFCR_FTH(threshold); 1025 desc->sg_req[i].chan_reg.dma_spar = src + offset; 1026 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; 1027 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; 1028 desc->sg_req[i].len = xfer_count; 1029 } 1030 1031 desc->num_sgs = num_sgs; 1032 desc->cyclic = false; 1033 1034 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 1035 } 1036 1037 static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) 1038 { 1039 u32 dma_scr, width, ndtr; 1040 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 1041 1042 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 1043 width = STM32_DMA_SCR_PSIZE_GET(dma_scr); 1044 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); 1045 1046 return ndtr << width; 1047 } 1048 1049 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, 1050 struct stm32_dma_desc *desc, 1051 u32 next_sg) 1052 { 1053 u32 modulo, burst_size; 1054 u32 residue = 0; 1055 int i; 1056 1057 /* 1058 * In cyclic mode, for the last period, residue = remaining bytes from 1059 * NDTR 1060 */ 1061 if (chan->desc->cyclic && next_sg == 0) { 1062 residue = stm32_dma_get_remaining_bytes(chan); 1063 goto end; 1064 } 1065 1066 /* 1067 * For all other periods in cyclic mode, and in sg mode, 1068 * residue = remaining bytes from NDTR + remaining periods/sg to be 1069 * transferred 1070 */ 1071 for (i = next_sg; i < desc->num_sgs; i++) 1072 residue += desc->sg_req[i].len; 1073 residue += stm32_dma_get_remaining_bytes(chan); 1074 1075 end: 1076 if (!chan->mem_burst) 1077 return residue; 1078 1079 burst_size = chan->mem_burst * chan->mem_width; 1080 modulo = residue % burst_size; 1081 if (modulo) 1082 residue = residue - modulo + burst_size; 1083 1084 return residue; 1085 } 1086 1087 static enum dma_status stm32_dma_tx_status(struct dma_chan *c, 1088 dma_cookie_t cookie, 1089 struct dma_tx_state *state) 1090 { 1091 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 1092 struct virt_dma_desc *vdesc; 1093 enum dma_status status; 1094 unsigned long flags; 1095 u32 residue = 0; 1096 1097 status = dma_cookie_status(c, cookie, state); 1098 if (status == DMA_COMPLETE || !state) 1099 return status; 1100 1101 spin_lock_irqsave(&chan->vchan.lock, flags); 1102 vdesc = vchan_find_desc(&chan->vchan, cookie); 1103 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) 1104 residue = stm32_dma_desc_residue(chan, chan->desc, 1105 chan->next_sg); 1106 else if (vdesc) 1107 residue = stm32_dma_desc_residue(chan, 1108 to_stm32_dma_desc(vdesc), 0); 1109 dma_set_residue(state, residue); 1110 1111 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1112 1113 return status; 1114 } 1115 1116 static int stm32_dma_alloc_chan_resources(struct dma_chan *c) 1117 { 1118 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 1119 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 1120 int ret; 1121 1122 chan->config_init = false; 1123 ret = clk_prepare_enable(dmadev->clk); 1124 if (ret < 0) { 1125 dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); 1126 return ret; 1127 } 1128 1129 ret = stm32_dma_disable_chan(chan); 1130 if (ret < 0) 1131 clk_disable_unprepare(dmadev->clk); 1132 1133 return ret; 1134 } 1135 1136 static void stm32_dma_free_chan_resources(struct dma_chan *c) 1137 { 1138 struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 1139 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 1140 unsigned long flags; 1141 1142 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); 1143 1144 if (chan->busy) { 1145 spin_lock_irqsave(&chan->vchan.lock, flags); 1146 stm32_dma_stop(chan); 1147 chan->desc = NULL; 1148 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1149 } 1150 1151 clk_disable_unprepare(dmadev->clk); 1152 1153 vchan_free_chan_resources(to_virt_chan(c)); 1154 } 1155 1156 static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) 1157 { 1158 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); 1159 } 1160 1161 static void stm32_dma_set_config(struct stm32_dma_chan *chan, 1162 struct stm32_dma_cfg *cfg) 1163 { 1164 stm32_dma_clear_reg(&chan->chan_reg); 1165 1166 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; 1167 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); 1168 1169 /* Enable Interrupts */ 1170 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; 1171 1172 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); 1173 } 1174 1175 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, 1176 struct of_dma *ofdma) 1177 { 1178 struct stm32_dma_device *dmadev = ofdma->of_dma_data; 1179 struct device *dev = dmadev->ddev.dev; 1180 struct stm32_dma_cfg cfg; 1181 struct stm32_dma_chan *chan; 1182 struct dma_chan *c; 1183 1184 if (dma_spec->args_count < 4) { 1185 dev_err(dev, "Bad number of cells\n"); 1186 return NULL; 1187 } 1188 1189 cfg.channel_id = dma_spec->args[0]; 1190 cfg.request_line = dma_spec->args[1]; 1191 cfg.stream_config = dma_spec->args[2]; 1192 cfg.features = dma_spec->args[3]; 1193 1194 if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || 1195 cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { 1196 dev_err(dev, "Bad channel and/or request id\n"); 1197 return NULL; 1198 } 1199 1200 chan = &dmadev->chan[cfg.channel_id]; 1201 1202 c = dma_get_slave_channel(&chan->vchan.chan); 1203 if (!c) { 1204 dev_err(dev, "No more channels available\n"); 1205 return NULL; 1206 } 1207 1208 stm32_dma_set_config(chan, &cfg); 1209 1210 return c; 1211 } 1212 1213 static const struct of_device_id stm32_dma_of_match[] = { 1214 { .compatible = "st,stm32-dma", }, 1215 { /* sentinel */ }, 1216 }; 1217 MODULE_DEVICE_TABLE(of, stm32_dma_of_match); 1218 1219 static int stm32_dma_probe(struct platform_device *pdev) 1220 { 1221 struct stm32_dma_chan *chan; 1222 struct stm32_dma_device *dmadev; 1223 struct dma_device *dd; 1224 const struct of_device_id *match; 1225 struct resource *res; 1226 int i, ret; 1227 1228 match = of_match_device(stm32_dma_of_match, &pdev->dev); 1229 if (!match) { 1230 dev_err(&pdev->dev, "Error: No device match found\n"); 1231 return -ENODEV; 1232 } 1233 1234 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 1235 if (!dmadev) 1236 return -ENOMEM; 1237 1238 dd = &dmadev->ddev; 1239 1240 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1241 dmadev->base = devm_ioremap_resource(&pdev->dev, res); 1242 if (IS_ERR(dmadev->base)) 1243 return PTR_ERR(dmadev->base); 1244 1245 dmadev->clk = devm_clk_get(&pdev->dev, NULL); 1246 if (IS_ERR(dmadev->clk)) { 1247 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1248 return PTR_ERR(dmadev->clk); 1249 } 1250 1251 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, 1252 "st,mem2mem"); 1253 1254 dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); 1255 if (!IS_ERR(dmadev->rst)) { 1256 reset_control_assert(dmadev->rst); 1257 udelay(2); 1258 reset_control_deassert(dmadev->rst); 1259 } 1260 1261 dma_cap_set(DMA_SLAVE, dd->cap_mask); 1262 dma_cap_set(DMA_PRIVATE, dd->cap_mask); 1263 dma_cap_set(DMA_CYCLIC, dd->cap_mask); 1264 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; 1265 dd->device_free_chan_resources = stm32_dma_free_chan_resources; 1266 dd->device_tx_status = stm32_dma_tx_status; 1267 dd->device_issue_pending = stm32_dma_issue_pending; 1268 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; 1269 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; 1270 dd->device_config = stm32_dma_slave_config; 1271 dd->device_terminate_all = stm32_dma_terminate_all; 1272 dd->device_synchronize = stm32_dma_synchronize; 1273 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1274 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1275 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1276 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1277 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1278 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1279 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1280 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1281 dd->max_burst = STM32_DMA_MAX_BURST; 1282 dd->dev = &pdev->dev; 1283 INIT_LIST_HEAD(&dd->channels); 1284 1285 if (dmadev->mem2mem) { 1286 dma_cap_set(DMA_MEMCPY, dd->cap_mask); 1287 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; 1288 dd->directions |= BIT(DMA_MEM_TO_MEM); 1289 } 1290 1291 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1292 chan = &dmadev->chan[i]; 1293 chan->id = i; 1294 chan->vchan.desc_free = stm32_dma_desc_free; 1295 vchan_init(&chan->vchan, dd); 1296 } 1297 1298 ret = dma_async_device_register(dd); 1299 if (ret) 1300 return ret; 1301 1302 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1303 chan = &dmadev->chan[i]; 1304 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1305 if (!res) { 1306 ret = -EINVAL; 1307 dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1308 goto err_unregister; 1309 } 1310 chan->irq = res->start; 1311 ret = devm_request_irq(&pdev->dev, chan->irq, 1312 stm32_dma_chan_irq, 0, 1313 dev_name(chan2dev(chan)), chan); 1314 if (ret) { 1315 dev_err(&pdev->dev, 1316 "request_irq failed with err %d channel %d\n", 1317 ret, i); 1318 goto err_unregister; 1319 } 1320 } 1321 1322 ret = of_dma_controller_register(pdev->dev.of_node, 1323 stm32_dma_of_xlate, dmadev); 1324 if (ret < 0) { 1325 dev_err(&pdev->dev, 1326 "STM32 DMA DMA OF registration failed %d\n", ret); 1327 goto err_unregister; 1328 } 1329 1330 platform_set_drvdata(pdev, dmadev); 1331 1332 dev_info(&pdev->dev, "STM32 DMA driver registered\n"); 1333 1334 return 0; 1335 1336 err_unregister: 1337 dma_async_device_unregister(dd); 1338 1339 return ret; 1340 } 1341 1342 static struct platform_driver stm32_dma_driver = { 1343 .driver = { 1344 .name = "stm32-dma", 1345 .of_match_table = stm32_dma_of_match, 1346 }, 1347 }; 1348 1349 static int __init stm32_dma_init(void) 1350 { 1351 return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); 1352 } 1353 subsys_initcall(stm32_dma_init); 1354