1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Ericsson AB 2007-2008 4 * Copyright (C) ST-Ericsson SA 2008-2010 5 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 7 */ 8 9 #include <linux/dma-mapping.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/dmaengine.h> 14 #include <linux/platform_device.h> 15 #include <linux/clk.h> 16 #include <linux/delay.h> 17 #include <linux/log2.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/err.h> 21 #include <linux/of.h> 22 #include <linux/of_dma.h> 23 #include <linux/amba/bus.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/platform_data/dma-ste-dma40.h> 26 27 #include "dmaengine.h" 28 #include "ste_dma40_ll.h" 29 30 #define D40_NAME "dma40" 31 32 #define D40_PHY_CHAN -1 33 34 /* For masking out/in 2 bit channel positions */ 35 #define D40_CHAN_POS(chan) (2 * (chan / 2)) 36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 37 38 /* Maximum iterations taken before giving up suspending a channel */ 39 #define D40_SUSPEND_MAX_IT 500 40 41 /* Milliseconds */ 42 #define DMA40_AUTOSUSPEND_DELAY 100 43 44 /* Hardware requirement on LCLA alignment */ 45 #define LCLA_ALIGNMENT 0x40000 46 47 /* Max number of links per event group */ 48 #define D40_LCLA_LINK_PER_EVENT_GRP 128 49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 50 51 /* Max number of logical channels per physical channel */ 52 #define D40_MAX_LOG_CHAN_PER_PHY 32 53 54 /* Attempts before giving up to trying to get pages that are aligned */ 55 #define MAX_LCLA_ALLOC_ATTEMPTS 256 56 57 /* Bit markings for allocation map */ 58 #define D40_ALLOC_FREE BIT(31) 59 #define D40_ALLOC_PHY BIT(30) 60 #define D40_ALLOC_LOG_FREE 0 61 62 #define D40_MEMCPY_MAX_CHANS 8 63 64 /* Reserved event lines for memcpy only. */ 65 #define DB8500_DMA_MEMCPY_EV_0 51 66 #define DB8500_DMA_MEMCPY_EV_1 56 67 #define DB8500_DMA_MEMCPY_EV_2 57 68 #define DB8500_DMA_MEMCPY_EV_3 58 69 #define DB8500_DMA_MEMCPY_EV_4 59 70 #define DB8500_DMA_MEMCPY_EV_5 60 71 72 static int dma40_memcpy_channels[] = { 73 DB8500_DMA_MEMCPY_EV_0, 74 DB8500_DMA_MEMCPY_EV_1, 75 DB8500_DMA_MEMCPY_EV_2, 76 DB8500_DMA_MEMCPY_EV_3, 77 DB8500_DMA_MEMCPY_EV_4, 78 DB8500_DMA_MEMCPY_EV_5, 79 }; 80 81 /* Default configuration for physcial memcpy */ 82 static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { 83 .mode = STEDMA40_MODE_PHYSICAL, 84 .dir = DMA_MEM_TO_MEM, 85 86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 87 .src_info.psize = STEDMA40_PSIZE_PHY_1, 88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 89 90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 91 .dst_info.psize = STEDMA40_PSIZE_PHY_1, 92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 93 }; 94 95 /* Default configuration for logical memcpy */ 96 static const struct stedma40_chan_cfg dma40_memcpy_conf_log = { 97 .mode = STEDMA40_MODE_LOGICAL, 98 .dir = DMA_MEM_TO_MEM, 99 100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 101 .src_info.psize = STEDMA40_PSIZE_LOG_1, 102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 103 104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 105 .dst_info.psize = STEDMA40_PSIZE_LOG_1, 106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 107 }; 108 109 /** 110 * enum 40_command - The different commands and/or statuses. 111 * 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 116 */ 117 enum d40_command { 118 D40_DMA_STOP = 0, 119 D40_DMA_RUN = 1, 120 D40_DMA_SUSPEND_REQ = 2, 121 D40_DMA_SUSPENDED = 3 122 }; 123 124 /* 125 * enum d40_events - The different Event Enables for the event lines. 126 * 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. 130 * @D40_ROUND_EVENTLINE: Status check for event line. 131 */ 132 133 enum d40_events { 134 D40_DEACTIVATE_EVENTLINE = 0, 135 D40_ACTIVATE_EVENTLINE = 1, 136 D40_SUSPEND_REQ_EVENTLINE = 2, 137 D40_ROUND_EVENTLINE = 3 138 }; 139 140 /* 141 * These are the registers that has to be saved and later restored 142 * when the DMA hw is powered off. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 144 */ 145 static __maybe_unused u32 d40_backup_regs[] = { 146 D40_DREG_LCPA, 147 D40_DREG_LCLA, 148 D40_DREG_PRMSE, 149 D40_DREG_PRMSO, 150 D40_DREG_PRMOE, 151 D40_DREG_PRMOO, 152 }; 153 154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) 155 156 /* 157 * since 9540 and 8540 has the same HW revision 158 * use v4a for 9540 or ealier 159 * use v4b for 8540 or later 160 * HW revision: 161 * DB8500ed has revision 0 162 * DB8500v1 has revision 2 163 * DB8500v2 has revision 3 164 * AP9540v1 has revision 4 165 * DB8540v1 has revision 4 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a 167 */ 168 static u32 d40_backup_regs_v4a[] = { 169 D40_DREG_PSEG1, 170 D40_DREG_PSEG2, 171 D40_DREG_PSEG3, 172 D40_DREG_PSEG4, 173 D40_DREG_PCEG1, 174 D40_DREG_PCEG2, 175 D40_DREG_PCEG3, 176 D40_DREG_PCEG4, 177 D40_DREG_RSEG1, 178 D40_DREG_RSEG2, 179 D40_DREG_RSEG3, 180 D40_DREG_RSEG4, 181 D40_DREG_RCEG1, 182 D40_DREG_RCEG2, 183 D40_DREG_RCEG3, 184 D40_DREG_RCEG4, 185 }; 186 187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) 188 189 static u32 d40_backup_regs_v4b[] = { 190 D40_DREG_CPSEG1, 191 D40_DREG_CPSEG2, 192 D40_DREG_CPSEG3, 193 D40_DREG_CPSEG4, 194 D40_DREG_CPSEG5, 195 D40_DREG_CPCEG1, 196 D40_DREG_CPCEG2, 197 D40_DREG_CPCEG3, 198 D40_DREG_CPCEG4, 199 D40_DREG_CPCEG5, 200 D40_DREG_CRSEG1, 201 D40_DREG_CRSEG2, 202 D40_DREG_CRSEG3, 203 D40_DREG_CRSEG4, 204 D40_DREG_CRSEG5, 205 D40_DREG_CRCEG1, 206 D40_DREG_CRCEG2, 207 D40_DREG_CRCEG3, 208 D40_DREG_CRCEG4, 209 D40_DREG_CRCEG5, 210 }; 211 212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 213 214 static __maybe_unused u32 d40_backup_regs_chan[] = { 215 D40_CHAN_REG_SSCFG, 216 D40_CHAN_REG_SSELT, 217 D40_CHAN_REG_SSPTR, 218 D40_CHAN_REG_SSLNK, 219 D40_CHAN_REG_SDCFG, 220 D40_CHAN_REG_SDELT, 221 D40_CHAN_REG_SDPTR, 222 D40_CHAN_REG_SDLNK, 223 }; 224 225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ 226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) 227 228 /** 229 * struct d40_interrupt_lookup - lookup table for interrupt handler 230 * 231 * @src: Interrupt mask register. 232 * @clr: Interrupt clear register. 233 * @is_error: true if this is an error interrupt. 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 236 */ 237 struct d40_interrupt_lookup { 238 u32 src; 239 u32 clr; 240 bool is_error; 241 int offset; 242 }; 243 244 245 static struct d40_interrupt_lookup il_v4a[] = { 246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 256 }; 257 258 static struct d40_interrupt_lookup il_v4b[] = { 259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, 260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, 261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, 262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, 263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, 264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, 265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, 266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, 267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, 268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, 269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, 270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, 271 }; 272 273 /** 274 * struct d40_reg_val - simple lookup struct 275 * 276 * @reg: The register. 277 * @val: The value that belongs to the register in reg. 278 */ 279 struct d40_reg_val { 280 unsigned int reg; 281 unsigned int val; 282 }; 283 284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = { 285 /* Clock every part of the DMA block from start */ 286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 287 288 /* Interrupts on all logical channels */ 289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 301 }; 302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = { 303 /* Clock every part of the DMA block from start */ 304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 305 306 /* Interrupts on all logical channels */ 307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, 308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, 309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, 310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, 311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, 312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, 313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, 314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, 315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, 316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, 317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, 318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, 319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, 320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, 321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} 322 }; 323 324 /** 325 * struct d40_lli_pool - Structure for keeping LLIs in memory 326 * 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 329 * pre_alloc_lli is used. 330 * @dma_addr: DMA address, if mapped 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 333 * one buffer to one buffer. 334 */ 335 struct d40_lli_pool { 336 void *base; 337 int size; 338 dma_addr_t dma_addr; 339 /* Space for dst and src, plus an extra for padding */ 340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 341 }; 342 343 /** 344 * struct d40_desc - A descriptor is one DMA job. 345 * 346 * @lli_phy: LLI settings for physical channel. Both src and dst= 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 348 * lli_len equals one. 349 * @lli_log: Same as above but for logical channels. 350 * @lli_pool: The pool with two entries pre-allocated. 351 * @lli_len: Number of llis of current descriptor. 352 * @lli_current: Number of transferred llis. 353 * @lcla_alloc: Number of LCLA entries allocated. 354 * @txd: DMA engine struct. Used for among other things for communication 355 * during a transfer. 356 * @node: List entry. 357 * @is_in_client_list: true if the client owns this descriptor. 358 * @cyclic: true if this is a cyclic job 359 * 360 * This descriptor is used for both logical and physical transfers. 361 */ 362 struct d40_desc { 363 /* LLI physical */ 364 struct d40_phy_lli_bidir lli_phy; 365 /* LLI logical */ 366 struct d40_log_lli_bidir lli_log; 367 368 struct d40_lli_pool lli_pool; 369 int lli_len; 370 int lli_current; 371 int lcla_alloc; 372 373 struct dma_async_tx_descriptor txd; 374 struct list_head node; 375 376 bool is_in_client_list; 377 bool cyclic; 378 }; 379 380 /** 381 * struct d40_lcla_pool - LCLA pool settings and data. 382 * 383 * @base: The virtual address of LCLA. 18 bit aligned. 384 * @dma_addr: DMA address, if mapped 385 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. 386 * This pointer is only there for clean-up on error. 387 * @pages: The number of pages needed for all physical channels. 388 * Only used later for clean-up on error 389 * @lock: Lock to protect the content in this struct. 390 * @alloc_map: big map over which LCLA entry is own by which job. 391 */ 392 struct d40_lcla_pool { 393 void *base; 394 dma_addr_t dma_addr; 395 void *base_unaligned; 396 int pages; 397 spinlock_t lock; 398 struct d40_desc **alloc_map; 399 }; 400 401 /** 402 * struct d40_phy_res - struct for handling eventlines mapped to physical 403 * channels. 404 * 405 * @lock: A lock protection this entity. 406 * @reserved: True if used by secure world or otherwise. 407 * @num: The physical channel number of this entity. 408 * @allocated_src: Bit mapped to show which src event line's are mapped to 409 * this physical channel. Can also be free or physically allocated. 410 * @allocated_dst: Same as for src but is dst. 411 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 412 * event line number. 413 * @use_soft_lli: To mark if the linked lists of channel are managed by SW. 414 */ 415 struct d40_phy_res { 416 spinlock_t lock; 417 bool reserved; 418 int num; 419 u32 allocated_src; 420 u32 allocated_dst; 421 bool use_soft_lli; 422 }; 423 424 struct d40_base; 425 426 /** 427 * struct d40_chan - Struct that describes a channel. 428 * 429 * @lock: A spinlock to protect this struct. 430 * @log_num: The logical number, if any of this channel. 431 * @pending_tx: The number of pending transfers. Used between interrupt handler 432 * and tasklet. 433 * @busy: Set to true when transfer is ongoing on this channel. 434 * @phy_chan: Pointer to physical channel which this instance runs on. If this 435 * point is NULL, then the channel is not allocated. 436 * @chan: DMA engine handle. 437 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 438 * transfer and call client callback. 439 * @client: Cliented owned descriptor list. 440 * @pending_queue: Submitted jobs, to be issued by issue_pending() 441 * @active: Active descriptor. 442 * @done: Completed jobs 443 * @queue: Queued jobs. 444 * @prepare_queue: Prepared jobs. 445 * @dma_cfg: The client configuration of this dma channel. 446 * @slave_config: DMA slave configuration. 447 * @configured: whether the dma_cfg configuration is valid 448 * @base: Pointer to the device instance struct. 449 * @src_def_cfg: Default cfg register setting for src. 450 * @dst_def_cfg: Default cfg register setting for dst. 451 * @log_def: Default logical channel settings. 452 * @lcpa: Pointer to dst and src lcpa settings. 453 * @runtime_addr: runtime configured address. 454 * @runtime_direction: runtime configured direction. 455 * 456 * This struct can either "be" a logical or a physical channel. 457 */ 458 struct d40_chan { 459 spinlock_t lock; 460 int log_num; 461 int pending_tx; 462 bool busy; 463 struct d40_phy_res *phy_chan; 464 struct dma_chan chan; 465 struct tasklet_struct tasklet; 466 struct list_head client; 467 struct list_head pending_queue; 468 struct list_head active; 469 struct list_head done; 470 struct list_head queue; 471 struct list_head prepare_queue; 472 struct stedma40_chan_cfg dma_cfg; 473 struct dma_slave_config slave_config; 474 bool configured; 475 struct d40_base *base; 476 /* Default register configurations */ 477 u32 src_def_cfg; 478 u32 dst_def_cfg; 479 struct d40_def_lcsp log_def; 480 struct d40_log_lli_full *lcpa; 481 /* Runtime reconfiguration */ 482 dma_addr_t runtime_addr; 483 enum dma_transfer_direction runtime_direction; 484 }; 485 486 /** 487 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA 488 * controller 489 * 490 * @backup: the pointer to the registers address array for backup 491 * @backup_size: the size of the registers address array for backup 492 * @realtime_en: the realtime enable register 493 * @realtime_clear: the realtime clear register 494 * @high_prio_en: the high priority enable register 495 * @high_prio_clear: the high priority clear register 496 * @interrupt_en: the interrupt enable register 497 * @interrupt_clear: the interrupt clear register 498 * @il: the pointer to struct d40_interrupt_lookup 499 * @il_size: the size of d40_interrupt_lookup array 500 * @init_reg: the pointer to the struct d40_reg_val 501 * @init_reg_size: the size of d40_reg_val array 502 */ 503 struct d40_gen_dmac { 504 u32 *backup; 505 u32 backup_size; 506 u32 realtime_en; 507 u32 realtime_clear; 508 u32 high_prio_en; 509 u32 high_prio_clear; 510 u32 interrupt_en; 511 u32 interrupt_clear; 512 struct d40_interrupt_lookup *il; 513 u32 il_size; 514 struct d40_reg_val *init_reg; 515 u32 init_reg_size; 516 }; 517 518 /** 519 * struct d40_base - The big global struct, one for each probe'd instance. 520 * 521 * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 522 * @execmd_lock: Lock for execute command usage since several channels share 523 * the same physical register. 524 * @dev: The device structure. 525 * @virtbase: The virtual base address of the DMA's register. 526 * @rev: silicon revision detected. 527 * @clk: Pointer to the DMA clock structure. 528 * @phy_start: Physical memory start of the DMA registers. 529 * @phy_size: Size of the DMA register map. 530 * @irq: The IRQ number. 531 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem 532 * transfers). 533 * @num_phy_chans: The number of physical channels. Read from HW. This 534 * is the number of available channels for this driver, not counting "Secure 535 * mode" allocated physical channels. 536 * @num_log_chans: The number of logical channels. Calculated from 537 * num_phy_chans. 538 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 539 * @dma_slave: dma_device channels that can do only do slave transfers. 540 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 541 * @phy_chans: Room for all possible physical channels in system. 542 * @log_chans: Room for all possible logical channels in system. 543 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 544 * to log_chans entries. 545 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 546 * to phy_chans entries. 547 * @plat_data: Pointer to provided platform_data which is the driver 548 * configuration. 549 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. 550 * @phy_res: Vector containing all physical channels. 551 * @lcla_pool: lcla pool settings and data. 552 * @lcpa_base: The virtual mapped address of LCPA. 553 * @phy_lcpa: The physical address of the LCPA. 554 * @lcpa_size: The size of the LCPA area. 555 * @desc_slab: cache for descriptors. 556 * @reg_val_backup: Here the values of some hardware registers are stored 557 * before the DMA is powered off. They are restored when the power is back on. 558 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and 559 * later 560 * @reg_val_backup_chan: Backup data for standard channel parameter registers. 561 * @regs_interrupt: Scratch space for registers during interrupt. 562 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. 563 * @gen_dmac: the struct for generic registers values to represent u8500/8540 564 * DMA controller 565 */ 566 struct d40_base { 567 spinlock_t interrupt_lock; 568 spinlock_t execmd_lock; 569 struct device *dev; 570 void __iomem *virtbase; 571 u8 rev:4; 572 struct clk *clk; 573 phys_addr_t phy_start; 574 resource_size_t phy_size; 575 int irq; 576 int num_memcpy_chans; 577 int num_phy_chans; 578 int num_log_chans; 579 struct dma_device dma_both; 580 struct dma_device dma_slave; 581 struct dma_device dma_memcpy; 582 struct d40_chan *phy_chans; 583 struct d40_chan *log_chans; 584 struct d40_chan **lookup_log_chans; 585 struct d40_chan **lookup_phy_chans; 586 struct stedma40_platform_data *plat_data; 587 struct regulator *lcpa_regulator; 588 /* Physical half channels */ 589 struct d40_phy_res *phy_res; 590 struct d40_lcla_pool lcla_pool; 591 void *lcpa_base; 592 dma_addr_t phy_lcpa; 593 resource_size_t lcpa_size; 594 struct kmem_cache *desc_slab; 595 u32 reg_val_backup[BACKUP_REGS_SZ]; 596 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; 597 u32 *reg_val_backup_chan; 598 u32 *regs_interrupt; 599 u16 gcc_pwr_off_mask; 600 struct d40_gen_dmac gen_dmac; 601 }; 602 603 static struct device *chan2dev(struct d40_chan *d40c) 604 { 605 return &d40c->chan.dev->device; 606 } 607 608 static bool chan_is_physical(struct d40_chan *chan) 609 { 610 return chan->log_num == D40_PHY_CHAN; 611 } 612 613 static bool chan_is_logical(struct d40_chan *chan) 614 { 615 return !chan_is_physical(chan); 616 } 617 618 static void __iomem *chan_base(struct d40_chan *chan) 619 { 620 return chan->base->virtbase + D40_DREG_PCBASE + 621 chan->phy_chan->num * D40_DREG_PCDELTA; 622 } 623 624 #define d40_err(dev, format, arg...) \ 625 dev_err(dev, "[%s] " format, __func__, ## arg) 626 627 #define chan_err(d40c, format, arg...) \ 628 d40_err(chan2dev(d40c), format, ## arg) 629 630 static int d40_set_runtime_config_write(struct dma_chan *chan, 631 struct dma_slave_config *config, 632 enum dma_transfer_direction direction); 633 634 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, 635 int lli_len) 636 { 637 bool is_log = chan_is_logical(d40c); 638 u32 align; 639 void *base; 640 641 if (is_log) 642 align = sizeof(struct d40_log_lli); 643 else 644 align = sizeof(struct d40_phy_lli); 645 646 if (lli_len == 1) { 647 base = d40d->lli_pool.pre_alloc_lli; 648 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 649 d40d->lli_pool.base = NULL; 650 } else { 651 d40d->lli_pool.size = lli_len * 2 * align; 652 653 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 654 d40d->lli_pool.base = base; 655 656 if (d40d->lli_pool.base == NULL) 657 return -ENOMEM; 658 } 659 660 if (is_log) { 661 d40d->lli_log.src = PTR_ALIGN(base, align); 662 d40d->lli_log.dst = d40d->lli_log.src + lli_len; 663 664 d40d->lli_pool.dma_addr = 0; 665 } else { 666 d40d->lli_phy.src = PTR_ALIGN(base, align); 667 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; 668 669 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, 670 d40d->lli_phy.src, 671 d40d->lli_pool.size, 672 DMA_TO_DEVICE); 673 674 if (dma_mapping_error(d40c->base->dev, 675 d40d->lli_pool.dma_addr)) { 676 kfree(d40d->lli_pool.base); 677 d40d->lli_pool.base = NULL; 678 d40d->lli_pool.dma_addr = 0; 679 return -ENOMEM; 680 } 681 } 682 683 return 0; 684 } 685 686 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) 687 { 688 if (d40d->lli_pool.dma_addr) 689 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, 690 d40d->lli_pool.size, DMA_TO_DEVICE); 691 692 kfree(d40d->lli_pool.base); 693 d40d->lli_pool.base = NULL; 694 d40d->lli_pool.size = 0; 695 d40d->lli_log.src = NULL; 696 d40d->lli_log.dst = NULL; 697 d40d->lli_phy.src = NULL; 698 d40d->lli_phy.dst = NULL; 699 } 700 701 static int d40_lcla_alloc_one(struct d40_chan *d40c, 702 struct d40_desc *d40d) 703 { 704 unsigned long flags; 705 int i; 706 int ret = -EINVAL; 707 708 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 709 710 /* 711 * Allocate both src and dst at the same time, therefore the half 712 * start on 1 since 0 can't be used since zero is used as end marker. 713 */ 714 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 715 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 716 717 if (!d40c->base->lcla_pool.alloc_map[idx]) { 718 d40c->base->lcla_pool.alloc_map[idx] = d40d; 719 d40d->lcla_alloc++; 720 ret = i; 721 break; 722 } 723 } 724 725 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 726 727 return ret; 728 } 729 730 static int d40_lcla_free_all(struct d40_chan *d40c, 731 struct d40_desc *d40d) 732 { 733 unsigned long flags; 734 int i; 735 int ret = -EINVAL; 736 737 if (chan_is_physical(d40c)) 738 return 0; 739 740 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 741 742 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 743 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 744 745 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { 746 d40c->base->lcla_pool.alloc_map[idx] = NULL; 747 d40d->lcla_alloc--; 748 if (d40d->lcla_alloc == 0) { 749 ret = 0; 750 break; 751 } 752 } 753 } 754 755 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 756 757 return ret; 758 759 } 760 761 static void d40_desc_remove(struct d40_desc *d40d) 762 { 763 list_del(&d40d->node); 764 } 765 766 static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 767 { 768 struct d40_desc *desc = NULL; 769 770 if (!list_empty(&d40c->client)) { 771 struct d40_desc *d; 772 struct d40_desc *_d; 773 774 list_for_each_entry_safe(d, _d, &d40c->client, node) { 775 if (async_tx_test_ack(&d->txd)) { 776 d40_desc_remove(d); 777 desc = d; 778 memset(desc, 0, sizeof(*desc)); 779 break; 780 } 781 } 782 } 783 784 if (!desc) 785 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); 786 787 if (desc) 788 INIT_LIST_HEAD(&desc->node); 789 790 return desc; 791 } 792 793 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 794 { 795 796 d40_pool_lli_free(d40c, d40d); 797 d40_lcla_free_all(d40c, d40d); 798 kmem_cache_free(d40c->base->desc_slab, d40d); 799 } 800 801 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 802 { 803 list_add_tail(&desc->node, &d40c->active); 804 } 805 806 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) 807 { 808 struct d40_phy_lli *lli_dst = desc->lli_phy.dst; 809 struct d40_phy_lli *lli_src = desc->lli_phy.src; 810 void __iomem *base = chan_base(chan); 811 812 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); 813 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); 814 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); 815 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); 816 817 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); 818 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); 819 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); 820 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); 821 } 822 823 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) 824 { 825 list_add_tail(&desc->node, &d40c->done); 826 } 827 828 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) 829 { 830 struct d40_lcla_pool *pool = &chan->base->lcla_pool; 831 struct d40_log_lli_bidir *lli = &desc->lli_log; 832 int lli_current = desc->lli_current; 833 int lli_len = desc->lli_len; 834 bool cyclic = desc->cyclic; 835 int curr_lcla = -EINVAL; 836 int first_lcla = 0; 837 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; 838 bool linkback; 839 840 /* 841 * We may have partially running cyclic transfers, in case we did't get 842 * enough LCLA entries. 843 */ 844 linkback = cyclic && lli_current == 0; 845 846 /* 847 * For linkback, we need one LCLA even with only one link, because we 848 * can't link back to the one in LCPA space 849 */ 850 if (linkback || (lli_len - lli_current > 1)) { 851 /* 852 * If the channel is expected to use only soft_lli don't 853 * allocate a lcla. This is to avoid a HW issue that exists 854 * in some controller during a peripheral to memory transfer 855 * that uses linked lists. 856 */ 857 if (!(chan->phy_chan->use_soft_lli && 858 chan->dma_cfg.dir == DMA_DEV_TO_MEM)) 859 curr_lcla = d40_lcla_alloc_one(chan, desc); 860 861 first_lcla = curr_lcla; 862 } 863 864 /* 865 * For linkback, we normally load the LCPA in the loop since we need to 866 * link it to the second LCLA and not the first. However, if we 867 * couldn't even get a first LCLA, then we have to run in LCPA and 868 * reload manually. 869 */ 870 if (!linkback || curr_lcla == -EINVAL) { 871 unsigned int flags = 0; 872 873 if (curr_lcla == -EINVAL) 874 flags |= LLI_TERM_INT; 875 876 d40_log_lli_lcpa_write(chan->lcpa, 877 &lli->dst[lli_current], 878 &lli->src[lli_current], 879 curr_lcla, 880 flags); 881 lli_current++; 882 } 883 884 if (curr_lcla < 0) 885 goto set_current; 886 887 for (; lli_current < lli_len; lli_current++) { 888 unsigned int lcla_offset = chan->phy_chan->num * 1024 + 889 8 * curr_lcla * 2; 890 struct d40_log_lli *lcla = pool->base + lcla_offset; 891 unsigned int flags = 0; 892 int next_lcla; 893 894 if (lli_current + 1 < lli_len) 895 next_lcla = d40_lcla_alloc_one(chan, desc); 896 else 897 next_lcla = linkback ? first_lcla : -EINVAL; 898 899 if (cyclic || next_lcla == -EINVAL) 900 flags |= LLI_TERM_INT; 901 902 if (linkback && curr_lcla == first_lcla) { 903 /* First link goes in both LCPA and LCLA */ 904 d40_log_lli_lcpa_write(chan->lcpa, 905 &lli->dst[lli_current], 906 &lli->src[lli_current], 907 next_lcla, flags); 908 } 909 910 /* 911 * One unused LCLA in the cyclic case if the very first 912 * next_lcla fails... 913 */ 914 d40_log_lli_lcla_write(lcla, 915 &lli->dst[lli_current], 916 &lli->src[lli_current], 917 next_lcla, flags); 918 919 /* 920 * Cache maintenance is not needed if lcla is 921 * mapped in esram 922 */ 923 if (!use_esram_lcla) { 924 dma_sync_single_range_for_device(chan->base->dev, 925 pool->dma_addr, lcla_offset, 926 2 * sizeof(struct d40_log_lli), 927 DMA_TO_DEVICE); 928 } 929 curr_lcla = next_lcla; 930 931 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { 932 lli_current++; 933 break; 934 } 935 } 936 set_current: 937 desc->lli_current = lli_current; 938 } 939 940 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 941 { 942 if (chan_is_physical(d40c)) { 943 d40_phy_lli_load(d40c, d40d); 944 d40d->lli_current = d40d->lli_len; 945 } else 946 d40_log_lli_to_lcxa(d40c, d40d); 947 } 948 949 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 950 { 951 return list_first_entry_or_null(&d40c->active, struct d40_desc, node); 952 } 953 954 /* remove desc from current queue and add it to the pending_queue */ 955 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 956 { 957 d40_desc_remove(desc); 958 desc->is_in_client_list = false; 959 list_add_tail(&desc->node, &d40c->pending_queue); 960 } 961 962 static struct d40_desc *d40_first_pending(struct d40_chan *d40c) 963 { 964 return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc, 965 node); 966 } 967 968 static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 969 { 970 return list_first_entry_or_null(&d40c->queue, struct d40_desc, node); 971 } 972 973 static struct d40_desc *d40_first_done(struct d40_chan *d40c) 974 { 975 return list_first_entry_or_null(&d40c->done, struct d40_desc, node); 976 } 977 978 static int d40_psize_2_burst_size(bool is_log, int psize) 979 { 980 if (is_log) { 981 if (psize == STEDMA40_PSIZE_LOG_1) 982 return 1; 983 } else { 984 if (psize == STEDMA40_PSIZE_PHY_1) 985 return 1; 986 } 987 988 return 2 << psize; 989 } 990 991 /* 992 * The dma only supports transmitting packages up to 993 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. 994 * 995 * Calculate the total number of dma elements required to send the entire sg list. 996 */ 997 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 998 { 999 int dmalen; 1000 u32 max_w = max(data_width1, data_width2); 1001 u32 min_w = min(data_width1, data_width2); 1002 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 1003 1004 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1005 seg_max -= max_w; 1006 1007 if (!IS_ALIGNED(size, max_w)) 1008 return -EINVAL; 1009 1010 if (size <= seg_max) 1011 dmalen = 1; 1012 else { 1013 dmalen = size / seg_max; 1014 if (dmalen * seg_max < size) 1015 dmalen++; 1016 } 1017 return dmalen; 1018 } 1019 1020 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, 1021 u32 data_width1, u32 data_width2) 1022 { 1023 struct scatterlist *sg; 1024 int i; 1025 int len = 0; 1026 int ret; 1027 1028 for_each_sg(sgl, sg, sg_len, i) { 1029 ret = d40_size_2_dmalen(sg_dma_len(sg), 1030 data_width1, data_width2); 1031 if (ret < 0) 1032 return ret; 1033 len += ret; 1034 } 1035 return len; 1036 } 1037 1038 static int __d40_execute_command_phy(struct d40_chan *d40c, 1039 enum d40_command command) 1040 { 1041 u32 status; 1042 int i; 1043 void __iomem *active_reg; 1044 int ret = 0; 1045 unsigned long flags; 1046 u32 wmask; 1047 1048 if (command == D40_DMA_STOP) { 1049 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); 1050 if (ret) 1051 return ret; 1052 } 1053 1054 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 1055 1056 if (d40c->phy_chan->num % 2 == 0) 1057 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1058 else 1059 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1060 1061 if (command == D40_DMA_SUSPEND_REQ) { 1062 status = (readl(active_reg) & 1063 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1064 D40_CHAN_POS(d40c->phy_chan->num); 1065 1066 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1067 goto unlock; 1068 } 1069 1070 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); 1071 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), 1072 active_reg); 1073 1074 if (command == D40_DMA_SUSPEND_REQ) { 1075 1076 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 1077 status = (readl(active_reg) & 1078 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1079 D40_CHAN_POS(d40c->phy_chan->num); 1080 1081 cpu_relax(); 1082 /* 1083 * Reduce the number of bus accesses while 1084 * waiting for the DMA to suspend. 1085 */ 1086 udelay(3); 1087 1088 if (status == D40_DMA_STOP || 1089 status == D40_DMA_SUSPENDED) 1090 break; 1091 } 1092 1093 if (i == D40_SUSPEND_MAX_IT) { 1094 chan_err(d40c, 1095 "unable to suspend the chl %d (log: %d) status %x\n", 1096 d40c->phy_chan->num, d40c->log_num, 1097 status); 1098 dump_stack(); 1099 ret = -EBUSY; 1100 } 1101 1102 } 1103 unlock: 1104 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 1105 return ret; 1106 } 1107 1108 static void d40_term_all(struct d40_chan *d40c) 1109 { 1110 struct d40_desc *d40d; 1111 struct d40_desc *_d; 1112 1113 /* Release completed descriptors */ 1114 while ((d40d = d40_first_done(d40c))) { 1115 d40_desc_remove(d40d); 1116 d40_desc_free(d40c, d40d); 1117 } 1118 1119 /* Release active descriptors */ 1120 while ((d40d = d40_first_active_get(d40c))) { 1121 d40_desc_remove(d40d); 1122 d40_desc_free(d40c, d40d); 1123 } 1124 1125 /* Release queued descriptors waiting for transfer */ 1126 while ((d40d = d40_first_queued(d40c))) { 1127 d40_desc_remove(d40d); 1128 d40_desc_free(d40c, d40d); 1129 } 1130 1131 /* Release pending descriptors */ 1132 while ((d40d = d40_first_pending(d40c))) { 1133 d40_desc_remove(d40d); 1134 d40_desc_free(d40c, d40d); 1135 } 1136 1137 /* Release client owned descriptors */ 1138 if (!list_empty(&d40c->client)) 1139 list_for_each_entry_safe(d40d, _d, &d40c->client, node) { 1140 d40_desc_remove(d40d); 1141 d40_desc_free(d40c, d40d); 1142 } 1143 1144 /* Release descriptors in prepare queue */ 1145 if (!list_empty(&d40c->prepare_queue)) 1146 list_for_each_entry_safe(d40d, _d, 1147 &d40c->prepare_queue, node) { 1148 d40_desc_remove(d40d); 1149 d40_desc_free(d40c, d40d); 1150 } 1151 1152 d40c->pending_tx = 0; 1153 } 1154 1155 static void __d40_config_set_event(struct d40_chan *d40c, 1156 enum d40_events event_type, u32 event, 1157 int reg) 1158 { 1159 void __iomem *addr = chan_base(d40c) + reg; 1160 int tries; 1161 u32 status; 1162 1163 switch (event_type) { 1164 1165 case D40_DEACTIVATE_EVENTLINE: 1166 1167 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1168 | ~D40_EVENTLINE_MASK(event), addr); 1169 break; 1170 1171 case D40_SUSPEND_REQ_EVENTLINE: 1172 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1173 D40_EVENTLINE_POS(event); 1174 1175 if (status == D40_DEACTIVATE_EVENTLINE || 1176 status == D40_SUSPEND_REQ_EVENTLINE) 1177 break; 1178 1179 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) 1180 | ~D40_EVENTLINE_MASK(event), addr); 1181 1182 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { 1183 1184 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1185 D40_EVENTLINE_POS(event); 1186 1187 cpu_relax(); 1188 /* 1189 * Reduce the number of bus accesses while 1190 * waiting for the DMA to suspend. 1191 */ 1192 udelay(3); 1193 1194 if (status == D40_DEACTIVATE_EVENTLINE) 1195 break; 1196 } 1197 1198 if (tries == D40_SUSPEND_MAX_IT) { 1199 chan_err(d40c, 1200 "unable to stop the event_line chl %d (log: %d)" 1201 "status %x\n", d40c->phy_chan->num, 1202 d40c->log_num, status); 1203 } 1204 break; 1205 1206 case D40_ACTIVATE_EVENTLINE: 1207 /* 1208 * The hardware sometimes doesn't register the enable when src and dst 1209 * event lines are active on the same logical channel. Retry to ensure 1210 * it does. Usually only one retry is sufficient. 1211 */ 1212 tries = 100; 1213 while (--tries) { 1214 writel((D40_ACTIVATE_EVENTLINE << 1215 D40_EVENTLINE_POS(event)) | 1216 ~D40_EVENTLINE_MASK(event), addr); 1217 1218 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1219 break; 1220 } 1221 1222 if (tries != 99) 1223 dev_dbg(chan2dev(d40c), 1224 "[%s] workaround enable S%cLNK (%d tries)\n", 1225 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1226 100 - tries); 1227 1228 WARN_ON(!tries); 1229 break; 1230 1231 case D40_ROUND_EVENTLINE: 1232 BUG(); 1233 break; 1234 1235 } 1236 } 1237 1238 static void d40_config_set_event(struct d40_chan *d40c, 1239 enum d40_events event_type) 1240 { 1241 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 1242 1243 /* Enable event line connected to device (or memcpy) */ 1244 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 1245 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 1246 __d40_config_set_event(d40c, event_type, event, 1247 D40_CHAN_REG_SSLNK); 1248 1249 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) 1250 __d40_config_set_event(d40c, event_type, event, 1251 D40_CHAN_REG_SDLNK); 1252 } 1253 1254 static u32 d40_chan_has_events(struct d40_chan *d40c) 1255 { 1256 void __iomem *chanbase = chan_base(d40c); 1257 u32 val; 1258 1259 val = readl(chanbase + D40_CHAN_REG_SSLNK); 1260 val |= readl(chanbase + D40_CHAN_REG_SDLNK); 1261 1262 return val; 1263 } 1264 1265 static int 1266 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) 1267 { 1268 unsigned long flags; 1269 int ret = 0; 1270 u32 active_status; 1271 void __iomem *active_reg; 1272 1273 if (d40c->phy_chan->num % 2 == 0) 1274 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1275 else 1276 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1277 1278 1279 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1280 1281 switch (command) { 1282 case D40_DMA_STOP: 1283 case D40_DMA_SUSPEND_REQ: 1284 1285 active_status = (readl(active_reg) & 1286 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1287 D40_CHAN_POS(d40c->phy_chan->num); 1288 1289 if (active_status == D40_DMA_RUN) 1290 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); 1291 else 1292 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); 1293 1294 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) 1295 ret = __d40_execute_command_phy(d40c, command); 1296 1297 break; 1298 1299 case D40_DMA_RUN: 1300 1301 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); 1302 ret = __d40_execute_command_phy(d40c, command); 1303 break; 1304 1305 case D40_DMA_SUSPENDED: 1306 BUG(); 1307 break; 1308 } 1309 1310 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 1311 return ret; 1312 } 1313 1314 static int d40_channel_execute_command(struct d40_chan *d40c, 1315 enum d40_command command) 1316 { 1317 if (chan_is_logical(d40c)) 1318 return __d40_execute_command_log(d40c, command); 1319 else 1320 return __d40_execute_command_phy(d40c, command); 1321 } 1322 1323 static u32 d40_get_prmo(struct d40_chan *d40c) 1324 { 1325 static const unsigned int phy_map[] = { 1326 [STEDMA40_PCHAN_BASIC_MODE] 1327 = D40_DREG_PRMO_PCHAN_BASIC, 1328 [STEDMA40_PCHAN_MODULO_MODE] 1329 = D40_DREG_PRMO_PCHAN_MODULO, 1330 [STEDMA40_PCHAN_DOUBLE_DST_MODE] 1331 = D40_DREG_PRMO_PCHAN_DOUBLE_DST, 1332 }; 1333 static const unsigned int log_map[] = { 1334 [STEDMA40_LCHAN_SRC_PHY_DST_LOG] 1335 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, 1336 [STEDMA40_LCHAN_SRC_LOG_DST_PHY] 1337 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, 1338 [STEDMA40_LCHAN_SRC_LOG_DST_LOG] 1339 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 1340 }; 1341 1342 if (chan_is_physical(d40c)) 1343 return phy_map[d40c->dma_cfg.mode_opt]; 1344 else 1345 return log_map[d40c->dma_cfg.mode_opt]; 1346 } 1347 1348 static void d40_config_write(struct d40_chan *d40c) 1349 { 1350 u32 addr_base; 1351 u32 var; 1352 1353 /* Odd addresses are even addresses + 4 */ 1354 addr_base = (d40c->phy_chan->num % 2) * 4; 1355 /* Setup channel mode to logical or physical */ 1356 var = ((u32)(chan_is_logical(d40c)) + 1) << 1357 D40_CHAN_POS(d40c->phy_chan->num); 1358 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 1359 1360 /* Setup operational mode option register */ 1361 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); 1362 1363 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 1364 1365 if (chan_is_logical(d40c)) { 1366 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) 1367 & D40_SREG_ELEM_LOG_LIDX_MASK; 1368 void __iomem *chanbase = chan_base(d40c); 1369 1370 /* Set default config for CFG reg */ 1371 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); 1372 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); 1373 1374 /* Set LIDX for lcla */ 1375 writel(lidx, chanbase + D40_CHAN_REG_SSELT); 1376 writel(lidx, chanbase + D40_CHAN_REG_SDELT); 1377 1378 /* Clear LNK which will be used by d40_chan_has_events() */ 1379 writel(0, chanbase + D40_CHAN_REG_SSLNK); 1380 writel(0, chanbase + D40_CHAN_REG_SDLNK); 1381 } 1382 } 1383 1384 static u32 d40_residue(struct d40_chan *d40c) 1385 { 1386 u32 num_elt; 1387 1388 if (chan_is_logical(d40c)) 1389 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1390 >> D40_MEM_LCSP2_ECNT_POS; 1391 else { 1392 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); 1393 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) 1394 >> D40_SREG_ELEM_PHY_ECNT_POS; 1395 } 1396 1397 return num_elt * d40c->dma_cfg.dst_info.data_width; 1398 } 1399 1400 static bool d40_tx_is_linked(struct d40_chan *d40c) 1401 { 1402 bool is_link; 1403 1404 if (chan_is_logical(d40c)) 1405 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1406 else 1407 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) 1408 & D40_SREG_LNK_PHYS_LNK_MASK; 1409 1410 return is_link; 1411 } 1412 1413 static int d40_pause(struct dma_chan *chan) 1414 { 1415 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 1416 int res = 0; 1417 unsigned long flags; 1418 1419 if (d40c->phy_chan == NULL) { 1420 chan_err(d40c, "Channel is not allocated!\n"); 1421 return -EINVAL; 1422 } 1423 1424 if (!d40c->busy) 1425 return 0; 1426 1427 spin_lock_irqsave(&d40c->lock, flags); 1428 pm_runtime_get_sync(d40c->base->dev); 1429 1430 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1431 1432 pm_runtime_mark_last_busy(d40c->base->dev); 1433 pm_runtime_put_autosuspend(d40c->base->dev); 1434 spin_unlock_irqrestore(&d40c->lock, flags); 1435 return res; 1436 } 1437 1438 static int d40_resume(struct dma_chan *chan) 1439 { 1440 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 1441 int res = 0; 1442 unsigned long flags; 1443 1444 if (d40c->phy_chan == NULL) { 1445 chan_err(d40c, "Channel is not allocated!\n"); 1446 return -EINVAL; 1447 } 1448 1449 if (!d40c->busy) 1450 return 0; 1451 1452 spin_lock_irqsave(&d40c->lock, flags); 1453 pm_runtime_get_sync(d40c->base->dev); 1454 1455 /* If bytes left to transfer or linked tx resume job */ 1456 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) 1457 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1458 1459 pm_runtime_mark_last_busy(d40c->base->dev); 1460 pm_runtime_put_autosuspend(d40c->base->dev); 1461 spin_unlock_irqrestore(&d40c->lock, flags); 1462 return res; 1463 } 1464 1465 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1466 { 1467 struct d40_chan *d40c = container_of(tx->chan, 1468 struct d40_chan, 1469 chan); 1470 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1471 unsigned long flags; 1472 dma_cookie_t cookie; 1473 1474 spin_lock_irqsave(&d40c->lock, flags); 1475 cookie = dma_cookie_assign(tx); 1476 d40_desc_queue(d40c, d40d); 1477 spin_unlock_irqrestore(&d40c->lock, flags); 1478 1479 return cookie; 1480 } 1481 1482 static int d40_start(struct d40_chan *d40c) 1483 { 1484 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1485 } 1486 1487 static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 1488 { 1489 struct d40_desc *d40d; 1490 int err; 1491 1492 /* Start queued jobs, if any */ 1493 d40d = d40_first_queued(d40c); 1494 1495 if (d40d != NULL) { 1496 if (!d40c->busy) { 1497 d40c->busy = true; 1498 pm_runtime_get_sync(d40c->base->dev); 1499 } 1500 1501 /* Remove from queue */ 1502 d40_desc_remove(d40d); 1503 1504 /* Add to active queue */ 1505 d40_desc_submit(d40c, d40d); 1506 1507 /* Initiate DMA job */ 1508 d40_desc_load(d40c, d40d); 1509 1510 /* Start dma job */ 1511 err = d40_start(d40c); 1512 1513 if (err) 1514 return NULL; 1515 } 1516 1517 return d40d; 1518 } 1519 1520 /* called from interrupt context */ 1521 static void dma_tc_handle(struct d40_chan *d40c) 1522 { 1523 struct d40_desc *d40d; 1524 1525 /* Get first active entry from list */ 1526 d40d = d40_first_active_get(d40c); 1527 1528 if (d40d == NULL) 1529 return; 1530 1531 if (d40d->cyclic) { 1532 /* 1533 * If this was a paritially loaded list, we need to reloaded 1534 * it, and only when the list is completed. We need to check 1535 * for done because the interrupt will hit for every link, and 1536 * not just the last one. 1537 */ 1538 if (d40d->lli_current < d40d->lli_len 1539 && !d40_tx_is_linked(d40c) 1540 && !d40_residue(d40c)) { 1541 d40_lcla_free_all(d40c, d40d); 1542 d40_desc_load(d40c, d40d); 1543 (void) d40_start(d40c); 1544 1545 if (d40d->lli_current == d40d->lli_len) 1546 d40d->lli_current = 0; 1547 } 1548 } else { 1549 d40_lcla_free_all(d40c, d40d); 1550 1551 if (d40d->lli_current < d40d->lli_len) { 1552 d40_desc_load(d40c, d40d); 1553 /* Start dma job */ 1554 (void) d40_start(d40c); 1555 return; 1556 } 1557 1558 if (d40_queue_start(d40c) == NULL) { 1559 d40c->busy = false; 1560 1561 pm_runtime_mark_last_busy(d40c->base->dev); 1562 pm_runtime_put_autosuspend(d40c->base->dev); 1563 } 1564 1565 d40_desc_remove(d40d); 1566 d40_desc_done(d40c, d40d); 1567 } 1568 1569 d40c->pending_tx++; 1570 tasklet_schedule(&d40c->tasklet); 1571 1572 } 1573 1574 static void dma_tasklet(struct tasklet_struct *t) 1575 { 1576 struct d40_chan *d40c = from_tasklet(d40c, t, tasklet); 1577 struct d40_desc *d40d; 1578 unsigned long flags; 1579 bool callback_active; 1580 struct dmaengine_desc_callback cb; 1581 1582 spin_lock_irqsave(&d40c->lock, flags); 1583 1584 /* Get first entry from the done list */ 1585 d40d = d40_first_done(d40c); 1586 if (d40d == NULL) { 1587 /* Check if we have reached here for cyclic job */ 1588 d40d = d40_first_active_get(d40c); 1589 if (d40d == NULL || !d40d->cyclic) 1590 goto check_pending_tx; 1591 } 1592 1593 if (!d40d->cyclic) 1594 dma_cookie_complete(&d40d->txd); 1595 1596 /* 1597 * If terminating a channel pending_tx is set to zero. 1598 * This prevents any finished active jobs to return to the client. 1599 */ 1600 if (d40c->pending_tx == 0) { 1601 spin_unlock_irqrestore(&d40c->lock, flags); 1602 return; 1603 } 1604 1605 /* Callback to client */ 1606 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); 1607 dmaengine_desc_get_callback(&d40d->txd, &cb); 1608 1609 if (!d40d->cyclic) { 1610 if (async_tx_test_ack(&d40d->txd)) { 1611 d40_desc_remove(d40d); 1612 d40_desc_free(d40c, d40d); 1613 } else if (!d40d->is_in_client_list) { 1614 d40_desc_remove(d40d); 1615 d40_lcla_free_all(d40c, d40d); 1616 list_add_tail(&d40d->node, &d40c->client); 1617 d40d->is_in_client_list = true; 1618 } 1619 } 1620 1621 d40c->pending_tx--; 1622 1623 if (d40c->pending_tx) 1624 tasklet_schedule(&d40c->tasklet); 1625 1626 spin_unlock_irqrestore(&d40c->lock, flags); 1627 1628 if (callback_active) 1629 dmaengine_desc_callback_invoke(&cb, NULL); 1630 1631 return; 1632 check_pending_tx: 1633 /* Rescue manouver if receiving double interrupts */ 1634 if (d40c->pending_tx > 0) 1635 d40c->pending_tx--; 1636 spin_unlock_irqrestore(&d40c->lock, flags); 1637 } 1638 1639 static irqreturn_t d40_handle_interrupt(int irq, void *data) 1640 { 1641 int i; 1642 u32 idx; 1643 u32 row; 1644 long chan = -1; 1645 struct d40_chan *d40c; 1646 struct d40_base *base = data; 1647 u32 *regs = base->regs_interrupt; 1648 struct d40_interrupt_lookup *il = base->gen_dmac.il; 1649 u32 il_size = base->gen_dmac.il_size; 1650 1651 spin_lock(&base->interrupt_lock); 1652 1653 /* Read interrupt status of both logical and physical channels */ 1654 for (i = 0; i < il_size; i++) 1655 regs[i] = readl(base->virtbase + il[i].src); 1656 1657 for (;;) { 1658 1659 chan = find_next_bit((unsigned long *)regs, 1660 BITS_PER_LONG * il_size, chan + 1); 1661 1662 /* No more set bits found? */ 1663 if (chan == BITS_PER_LONG * il_size) 1664 break; 1665 1666 row = chan / BITS_PER_LONG; 1667 idx = chan & (BITS_PER_LONG - 1); 1668 1669 if (il[row].offset == D40_PHY_CHAN) 1670 d40c = base->lookup_phy_chans[idx]; 1671 else 1672 d40c = base->lookup_log_chans[il[row].offset + idx]; 1673 1674 if (!d40c) { 1675 /* 1676 * No error because this can happen if something else 1677 * in the system is using the channel. 1678 */ 1679 continue; 1680 } 1681 1682 /* ACK interrupt */ 1683 writel(BIT(idx), base->virtbase + il[row].clr); 1684 1685 spin_lock(&d40c->lock); 1686 1687 if (!il[row].is_error) 1688 dma_tc_handle(d40c); 1689 else 1690 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", 1691 chan, il[row].offset, idx); 1692 1693 spin_unlock(&d40c->lock); 1694 } 1695 1696 spin_unlock(&base->interrupt_lock); 1697 1698 return IRQ_HANDLED; 1699 } 1700 1701 static int d40_validate_conf(struct d40_chan *d40c, 1702 struct stedma40_chan_cfg *conf) 1703 { 1704 int res = 0; 1705 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1706 1707 if (!conf->dir) { 1708 chan_err(d40c, "Invalid direction.\n"); 1709 res = -EINVAL; 1710 } 1711 1712 if ((is_log && conf->dev_type > d40c->base->num_log_chans) || 1713 (!is_log && conf->dev_type > d40c->base->num_phy_chans) || 1714 (conf->dev_type < 0)) { 1715 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); 1716 res = -EINVAL; 1717 } 1718 1719 if (conf->dir == DMA_DEV_TO_DEV) { 1720 /* 1721 * DMAC HW supports it. Will be added to this driver, 1722 * in case any dma client requires it. 1723 */ 1724 chan_err(d40c, "periph to periph not supported\n"); 1725 res = -EINVAL; 1726 } 1727 1728 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1729 conf->src_info.data_width != 1730 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1731 conf->dst_info.data_width) { 1732 /* 1733 * The DMAC hardware only supports 1734 * src (burst x width) == dst (burst x width) 1735 */ 1736 1737 chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); 1738 res = -EINVAL; 1739 } 1740 1741 return res; 1742 } 1743 1744 static bool d40_alloc_mask_set(struct d40_phy_res *phy, 1745 bool is_src, int log_event_line, bool is_log, 1746 bool *first_user) 1747 { 1748 unsigned long flags; 1749 spin_lock_irqsave(&phy->lock, flags); 1750 1751 *first_user = ((phy->allocated_src | phy->allocated_dst) 1752 == D40_ALLOC_FREE); 1753 1754 if (!is_log) { 1755 /* Physical interrupts are masked per physical full channel */ 1756 if (phy->allocated_src == D40_ALLOC_FREE && 1757 phy->allocated_dst == D40_ALLOC_FREE) { 1758 phy->allocated_dst = D40_ALLOC_PHY; 1759 phy->allocated_src = D40_ALLOC_PHY; 1760 goto found_unlock; 1761 } else 1762 goto not_found_unlock; 1763 } 1764 1765 /* Logical channel */ 1766 if (is_src) { 1767 if (phy->allocated_src == D40_ALLOC_PHY) 1768 goto not_found_unlock; 1769 1770 if (phy->allocated_src == D40_ALLOC_FREE) 1771 phy->allocated_src = D40_ALLOC_LOG_FREE; 1772 1773 if (!(phy->allocated_src & BIT(log_event_line))) { 1774 phy->allocated_src |= BIT(log_event_line); 1775 goto found_unlock; 1776 } else 1777 goto not_found_unlock; 1778 } else { 1779 if (phy->allocated_dst == D40_ALLOC_PHY) 1780 goto not_found_unlock; 1781 1782 if (phy->allocated_dst == D40_ALLOC_FREE) 1783 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1784 1785 if (!(phy->allocated_dst & BIT(log_event_line))) { 1786 phy->allocated_dst |= BIT(log_event_line); 1787 goto found_unlock; 1788 } 1789 } 1790 not_found_unlock: 1791 spin_unlock_irqrestore(&phy->lock, flags); 1792 return false; 1793 found_unlock: 1794 spin_unlock_irqrestore(&phy->lock, flags); 1795 return true; 1796 } 1797 1798 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1799 int log_event_line) 1800 { 1801 unsigned long flags; 1802 bool is_free = false; 1803 1804 spin_lock_irqsave(&phy->lock, flags); 1805 if (!log_event_line) { 1806 phy->allocated_dst = D40_ALLOC_FREE; 1807 phy->allocated_src = D40_ALLOC_FREE; 1808 is_free = true; 1809 goto unlock; 1810 } 1811 1812 /* Logical channel */ 1813 if (is_src) { 1814 phy->allocated_src &= ~BIT(log_event_line); 1815 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1816 phy->allocated_src = D40_ALLOC_FREE; 1817 } else { 1818 phy->allocated_dst &= ~BIT(log_event_line); 1819 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1820 phy->allocated_dst = D40_ALLOC_FREE; 1821 } 1822 1823 is_free = ((phy->allocated_src | phy->allocated_dst) == 1824 D40_ALLOC_FREE); 1825 unlock: 1826 spin_unlock_irqrestore(&phy->lock, flags); 1827 1828 return is_free; 1829 } 1830 1831 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) 1832 { 1833 int dev_type = d40c->dma_cfg.dev_type; 1834 int event_group; 1835 int event_line; 1836 struct d40_phy_res *phys; 1837 int i; 1838 int j; 1839 int log_num; 1840 int num_phy_chans; 1841 bool is_src; 1842 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; 1843 1844 phys = d40c->base->phy_res; 1845 num_phy_chans = d40c->base->num_phy_chans; 1846 1847 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 1848 log_num = 2 * dev_type; 1849 is_src = true; 1850 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 1851 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1852 /* dst event lines are used for logical memcpy */ 1853 log_num = 2 * dev_type + 1; 1854 is_src = false; 1855 } else 1856 return -EINVAL; 1857 1858 event_group = D40_TYPE_TO_GROUP(dev_type); 1859 event_line = D40_TYPE_TO_EVENT(dev_type); 1860 1861 if (!is_log) { 1862 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1863 /* Find physical half channel */ 1864 if (d40c->dma_cfg.use_fixed_channel) { 1865 i = d40c->dma_cfg.phy_channel; 1866 if (d40_alloc_mask_set(&phys[i], is_src, 1867 0, is_log, 1868 first_phy_user)) 1869 goto found_phy; 1870 } else { 1871 for (i = 0; i < num_phy_chans; i++) { 1872 if (d40_alloc_mask_set(&phys[i], is_src, 1873 0, is_log, 1874 first_phy_user)) 1875 goto found_phy; 1876 } 1877 } 1878 } else 1879 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1880 int phy_num = j + event_group * 2; 1881 for (i = phy_num; i < phy_num + 2; i++) { 1882 if (d40_alloc_mask_set(&phys[i], 1883 is_src, 1884 0, 1885 is_log, 1886 first_phy_user)) 1887 goto found_phy; 1888 } 1889 } 1890 return -EINVAL; 1891 found_phy: 1892 d40c->phy_chan = &phys[i]; 1893 d40c->log_num = D40_PHY_CHAN; 1894 goto out; 1895 } 1896 if (dev_type == -1) 1897 return -EINVAL; 1898 1899 /* Find logical channel */ 1900 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1901 int phy_num = j + event_group * 2; 1902 1903 if (d40c->dma_cfg.use_fixed_channel) { 1904 i = d40c->dma_cfg.phy_channel; 1905 1906 if ((i != phy_num) && (i != phy_num + 1)) { 1907 dev_err(chan2dev(d40c), 1908 "invalid fixed phy channel %d\n", i); 1909 return -EINVAL; 1910 } 1911 1912 if (d40_alloc_mask_set(&phys[i], is_src, event_line, 1913 is_log, first_phy_user)) 1914 goto found_log; 1915 1916 dev_err(chan2dev(d40c), 1917 "could not allocate fixed phy channel %d\n", i); 1918 return -EINVAL; 1919 } 1920 1921 /* 1922 * Spread logical channels across all available physical rather 1923 * than pack every logical channel at the first available phy 1924 * channels. 1925 */ 1926 if (is_src) { 1927 for (i = phy_num; i < phy_num + 2; i++) { 1928 if (d40_alloc_mask_set(&phys[i], is_src, 1929 event_line, is_log, 1930 first_phy_user)) 1931 goto found_log; 1932 } 1933 } else { 1934 for (i = phy_num + 1; i >= phy_num; i--) { 1935 if (d40_alloc_mask_set(&phys[i], is_src, 1936 event_line, is_log, 1937 first_phy_user)) 1938 goto found_log; 1939 } 1940 } 1941 } 1942 return -EINVAL; 1943 1944 found_log: 1945 d40c->phy_chan = &phys[i]; 1946 d40c->log_num = log_num; 1947 out: 1948 1949 if (is_log) 1950 d40c->base->lookup_log_chans[d40c->log_num] = d40c; 1951 else 1952 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 1953 1954 return 0; 1955 1956 } 1957 1958 static int d40_config_memcpy(struct d40_chan *d40c) 1959 { 1960 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 1961 1962 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 1963 d40c->dma_cfg = dma40_memcpy_conf_log; 1964 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; 1965 1966 d40_log_cfg(&d40c->dma_cfg, 1967 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1968 1969 } else if (dma_has_cap(DMA_MEMCPY, cap) && 1970 dma_has_cap(DMA_SLAVE, cap)) { 1971 d40c->dma_cfg = dma40_memcpy_conf_phy; 1972 1973 /* Generate interrrupt at end of transfer or relink. */ 1974 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); 1975 1976 /* Generate interrupt on error. */ 1977 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 1978 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 1979 1980 } else { 1981 chan_err(d40c, "No memcpy\n"); 1982 return -EINVAL; 1983 } 1984 1985 return 0; 1986 } 1987 1988 static int d40_free_dma(struct d40_chan *d40c) 1989 { 1990 1991 int res = 0; 1992 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 1993 struct d40_phy_res *phy = d40c->phy_chan; 1994 bool is_src; 1995 1996 /* Terminate all queued and active transfers */ 1997 d40_term_all(d40c); 1998 1999 if (phy == NULL) { 2000 chan_err(d40c, "phy == null\n"); 2001 return -EINVAL; 2002 } 2003 2004 if (phy->allocated_src == D40_ALLOC_FREE && 2005 phy->allocated_dst == D40_ALLOC_FREE) { 2006 chan_err(d40c, "channel already free\n"); 2007 return -EINVAL; 2008 } 2009 2010 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2011 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) 2012 is_src = false; 2013 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2014 is_src = true; 2015 else { 2016 chan_err(d40c, "Unknown direction\n"); 2017 return -EINVAL; 2018 } 2019 2020 pm_runtime_get_sync(d40c->base->dev); 2021 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 2022 if (res) { 2023 chan_err(d40c, "stop failed\n"); 2024 goto mark_last_busy; 2025 } 2026 2027 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); 2028 2029 if (chan_is_logical(d40c)) 2030 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 2031 else 2032 d40c->base->lookup_phy_chans[phy->num] = NULL; 2033 2034 if (d40c->busy) { 2035 pm_runtime_mark_last_busy(d40c->base->dev); 2036 pm_runtime_put_autosuspend(d40c->base->dev); 2037 } 2038 2039 d40c->busy = false; 2040 d40c->phy_chan = NULL; 2041 d40c->configured = false; 2042 mark_last_busy: 2043 pm_runtime_mark_last_busy(d40c->base->dev); 2044 pm_runtime_put_autosuspend(d40c->base->dev); 2045 return res; 2046 } 2047 2048 static bool d40_is_paused(struct d40_chan *d40c) 2049 { 2050 void __iomem *chanbase = chan_base(d40c); 2051 bool is_paused = false; 2052 unsigned long flags; 2053 void __iomem *active_reg; 2054 u32 status; 2055 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 2056 2057 spin_lock_irqsave(&d40c->lock, flags); 2058 2059 if (chan_is_physical(d40c)) { 2060 if (d40c->phy_chan->num % 2 == 0) 2061 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 2062 else 2063 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 2064 2065 status = (readl(active_reg) & 2066 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 2067 D40_CHAN_POS(d40c->phy_chan->num); 2068 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 2069 is_paused = true; 2070 goto unlock; 2071 } 2072 2073 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2074 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 2075 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2076 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 2077 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2078 } else { 2079 chan_err(d40c, "Unknown direction\n"); 2080 goto unlock; 2081 } 2082 2083 status = (status & D40_EVENTLINE_MASK(event)) >> 2084 D40_EVENTLINE_POS(event); 2085 2086 if (status != D40_DMA_RUN) 2087 is_paused = true; 2088 unlock: 2089 spin_unlock_irqrestore(&d40c->lock, flags); 2090 return is_paused; 2091 2092 } 2093 2094 static u32 stedma40_residue(struct dma_chan *chan) 2095 { 2096 struct d40_chan *d40c = 2097 container_of(chan, struct d40_chan, chan); 2098 u32 bytes_left; 2099 unsigned long flags; 2100 2101 spin_lock_irqsave(&d40c->lock, flags); 2102 bytes_left = d40_residue(d40c); 2103 spin_unlock_irqrestore(&d40c->lock, flags); 2104 2105 return bytes_left; 2106 } 2107 2108 static int 2109 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, 2110 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2111 unsigned int sg_len, dma_addr_t src_dev_addr, 2112 dma_addr_t dst_dev_addr) 2113 { 2114 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2115 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2116 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2117 int ret; 2118 2119 ret = d40_log_sg_to_lli(sg_src, sg_len, 2120 src_dev_addr, 2121 desc->lli_log.src, 2122 chan->log_def.lcsp1, 2123 src_info->data_width, 2124 dst_info->data_width); 2125 2126 ret = d40_log_sg_to_lli(sg_dst, sg_len, 2127 dst_dev_addr, 2128 desc->lli_log.dst, 2129 chan->log_def.lcsp3, 2130 dst_info->data_width, 2131 src_info->data_width); 2132 2133 return ret < 0 ? ret : 0; 2134 } 2135 2136 static int 2137 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, 2138 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2139 unsigned int sg_len, dma_addr_t src_dev_addr, 2140 dma_addr_t dst_dev_addr) 2141 { 2142 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2143 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2144 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2145 unsigned long flags = 0; 2146 int ret; 2147 2148 if (desc->cyclic) 2149 flags |= LLI_CYCLIC | LLI_TERM_INT; 2150 2151 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, 2152 desc->lli_phy.src, 2153 virt_to_phys(desc->lli_phy.src), 2154 chan->src_def_cfg, 2155 src_info, dst_info, flags); 2156 2157 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, 2158 desc->lli_phy.dst, 2159 virt_to_phys(desc->lli_phy.dst), 2160 chan->dst_def_cfg, 2161 dst_info, src_info, flags); 2162 2163 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, 2164 desc->lli_pool.size, DMA_TO_DEVICE); 2165 2166 return ret < 0 ? ret : 0; 2167 } 2168 2169 static struct d40_desc * 2170 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, 2171 unsigned int sg_len, unsigned long dma_flags) 2172 { 2173 struct stedma40_chan_cfg *cfg; 2174 struct d40_desc *desc; 2175 int ret; 2176 2177 desc = d40_desc_get(chan); 2178 if (!desc) 2179 return NULL; 2180 2181 cfg = &chan->dma_cfg; 2182 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, 2183 cfg->dst_info.data_width); 2184 if (desc->lli_len < 0) { 2185 chan_err(chan, "Unaligned size\n"); 2186 goto free_desc; 2187 } 2188 2189 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); 2190 if (ret < 0) { 2191 chan_err(chan, "Could not allocate lli\n"); 2192 goto free_desc; 2193 } 2194 2195 desc->lli_current = 0; 2196 desc->txd.flags = dma_flags; 2197 desc->txd.tx_submit = d40_tx_submit; 2198 2199 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); 2200 2201 return desc; 2202 free_desc: 2203 d40_desc_free(chan, desc); 2204 return NULL; 2205 } 2206 2207 static struct dma_async_tx_descriptor * 2208 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2209 struct scatterlist *sg_dst, unsigned int sg_len, 2210 enum dma_transfer_direction direction, unsigned long dma_flags) 2211 { 2212 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 2213 dma_addr_t src_dev_addr; 2214 dma_addr_t dst_dev_addr; 2215 struct d40_desc *desc; 2216 unsigned long flags; 2217 int ret; 2218 2219 if (!chan->phy_chan) { 2220 chan_err(chan, "Cannot prepare unallocated channel\n"); 2221 return NULL; 2222 } 2223 2224 d40_set_runtime_config_write(dchan, &chan->slave_config, direction); 2225 2226 spin_lock_irqsave(&chan->lock, flags); 2227 2228 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); 2229 if (desc == NULL) 2230 goto unlock; 2231 2232 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2233 desc->cyclic = true; 2234 2235 src_dev_addr = 0; 2236 dst_dev_addr = 0; 2237 if (direction == DMA_DEV_TO_MEM) 2238 src_dev_addr = chan->runtime_addr; 2239 else if (direction == DMA_MEM_TO_DEV) 2240 dst_dev_addr = chan->runtime_addr; 2241 2242 if (chan_is_logical(chan)) 2243 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 2244 sg_len, src_dev_addr, dst_dev_addr); 2245 else 2246 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, 2247 sg_len, src_dev_addr, dst_dev_addr); 2248 2249 if (ret) { 2250 chan_err(chan, "Failed to prepare %s sg job: %d\n", 2251 chan_is_logical(chan) ? "log" : "phy", ret); 2252 goto free_desc; 2253 } 2254 2255 /* 2256 * add descriptor to the prepare queue in order to be able 2257 * to free them later in terminate_all 2258 */ 2259 list_add_tail(&desc->node, &chan->prepare_queue); 2260 2261 spin_unlock_irqrestore(&chan->lock, flags); 2262 2263 return &desc->txd; 2264 free_desc: 2265 d40_desc_free(chan, desc); 2266 unlock: 2267 spin_unlock_irqrestore(&chan->lock, flags); 2268 return NULL; 2269 } 2270 2271 bool stedma40_filter(struct dma_chan *chan, void *data) 2272 { 2273 struct stedma40_chan_cfg *info = data; 2274 struct d40_chan *d40c = 2275 container_of(chan, struct d40_chan, chan); 2276 int err; 2277 2278 if (data) { 2279 err = d40_validate_conf(d40c, info); 2280 if (!err) 2281 d40c->dma_cfg = *info; 2282 } else 2283 err = d40_config_memcpy(d40c); 2284 2285 if (!err) 2286 d40c->configured = true; 2287 2288 return err == 0; 2289 } 2290 EXPORT_SYMBOL(stedma40_filter); 2291 2292 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) 2293 { 2294 bool realtime = d40c->dma_cfg.realtime; 2295 bool highprio = d40c->dma_cfg.high_priority; 2296 u32 rtreg; 2297 u32 event = D40_TYPE_TO_EVENT(dev_type); 2298 u32 group = D40_TYPE_TO_GROUP(dev_type); 2299 u32 bit = BIT(event); 2300 u32 prioreg; 2301 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2302 2303 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; 2304 /* 2305 * Due to a hardware bug, in some cases a logical channel triggered by 2306 * a high priority destination event line can generate extra packet 2307 * transactions. 2308 * 2309 * The workaround is to not set the high priority level for the 2310 * destination event lines that trigger logical channels. 2311 */ 2312 if (!src && chan_is_logical(d40c)) 2313 highprio = false; 2314 2315 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; 2316 2317 /* Destination event lines are stored in the upper halfword */ 2318 if (!src) 2319 bit <<= 16; 2320 2321 writel(bit, d40c->base->virtbase + prioreg + group * 4); 2322 writel(bit, d40c->base->virtbase + rtreg + group * 4); 2323 } 2324 2325 static void d40_set_prio_realtime(struct d40_chan *d40c) 2326 { 2327 if (d40c->base->rev < 3) 2328 return; 2329 2330 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 2331 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2332 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); 2333 2334 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || 2335 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2336 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); 2337 } 2338 2339 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) 2340 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) 2341 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) 2342 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) 2343 #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1) 2344 2345 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, 2346 struct of_dma *ofdma) 2347 { 2348 struct stedma40_chan_cfg cfg; 2349 dma_cap_mask_t cap; 2350 u32 flags; 2351 2352 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); 2353 2354 dma_cap_zero(cap); 2355 dma_cap_set(DMA_SLAVE, cap); 2356 2357 cfg.dev_type = dma_spec->args[0]; 2358 flags = dma_spec->args[2]; 2359 2360 switch (D40_DT_FLAGS_MODE(flags)) { 2361 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; 2362 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; 2363 } 2364 2365 switch (D40_DT_FLAGS_DIR(flags)) { 2366 case 0: 2367 cfg.dir = DMA_MEM_TO_DEV; 2368 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2369 break; 2370 case 1: 2371 cfg.dir = DMA_DEV_TO_MEM; 2372 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2373 break; 2374 } 2375 2376 if (D40_DT_FLAGS_FIXED_CHAN(flags)) { 2377 cfg.phy_channel = dma_spec->args[1]; 2378 cfg.use_fixed_channel = true; 2379 } 2380 2381 if (D40_DT_FLAGS_HIGH_PRIO(flags)) 2382 cfg.high_priority = true; 2383 2384 return dma_request_channel(cap, stedma40_filter, &cfg); 2385 } 2386 2387 /* DMA ENGINE functions */ 2388 static int d40_alloc_chan_resources(struct dma_chan *chan) 2389 { 2390 int err; 2391 unsigned long flags; 2392 struct d40_chan *d40c = 2393 container_of(chan, struct d40_chan, chan); 2394 bool is_free_phy; 2395 spin_lock_irqsave(&d40c->lock, flags); 2396 2397 dma_cookie_init(chan); 2398 2399 /* If no dma configuration is set use default configuration (memcpy) */ 2400 if (!d40c->configured) { 2401 err = d40_config_memcpy(d40c); 2402 if (err) { 2403 chan_err(d40c, "Failed to configure memcpy channel\n"); 2404 goto mark_last_busy; 2405 } 2406 } 2407 2408 err = d40_allocate_channel(d40c, &is_free_phy); 2409 if (err) { 2410 chan_err(d40c, "Failed to allocate channel\n"); 2411 d40c->configured = false; 2412 goto mark_last_busy; 2413 } 2414 2415 pm_runtime_get_sync(d40c->base->dev); 2416 2417 d40_set_prio_realtime(d40c); 2418 2419 if (chan_is_logical(d40c)) { 2420 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2421 d40c->lcpa = d40c->base->lcpa_base + 2422 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; 2423 else 2424 d40c->lcpa = d40c->base->lcpa_base + 2425 d40c->dma_cfg.dev_type * 2426 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2427 2428 /* Unmask the Global Interrupt Mask. */ 2429 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2430 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2431 } 2432 2433 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", 2434 chan_is_logical(d40c) ? "logical" : "physical", 2435 d40c->phy_chan->num, 2436 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); 2437 2438 2439 /* 2440 * Only write channel configuration to the DMA if the physical 2441 * resource is free. In case of multiple logical channels 2442 * on the same physical resource, only the first write is necessary. 2443 */ 2444 if (is_free_phy) 2445 d40_config_write(d40c); 2446 mark_last_busy: 2447 pm_runtime_mark_last_busy(d40c->base->dev); 2448 pm_runtime_put_autosuspend(d40c->base->dev); 2449 spin_unlock_irqrestore(&d40c->lock, flags); 2450 return err; 2451 } 2452 2453 static void d40_free_chan_resources(struct dma_chan *chan) 2454 { 2455 struct d40_chan *d40c = 2456 container_of(chan, struct d40_chan, chan); 2457 int err; 2458 unsigned long flags; 2459 2460 if (d40c->phy_chan == NULL) { 2461 chan_err(d40c, "Cannot free unallocated channel\n"); 2462 return; 2463 } 2464 2465 spin_lock_irqsave(&d40c->lock, flags); 2466 2467 err = d40_free_dma(d40c); 2468 2469 if (err) 2470 chan_err(d40c, "Failed to free channel\n"); 2471 spin_unlock_irqrestore(&d40c->lock, flags); 2472 } 2473 2474 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 2475 dma_addr_t dst, 2476 dma_addr_t src, 2477 size_t size, 2478 unsigned long dma_flags) 2479 { 2480 struct scatterlist dst_sg; 2481 struct scatterlist src_sg; 2482 2483 sg_init_table(&dst_sg, 1); 2484 sg_init_table(&src_sg, 1); 2485 2486 sg_dma_address(&dst_sg) = dst; 2487 sg_dma_address(&src_sg) = src; 2488 2489 sg_dma_len(&dst_sg) = size; 2490 sg_dma_len(&src_sg) = size; 2491 2492 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, 2493 DMA_MEM_TO_MEM, dma_flags); 2494 } 2495 2496 static struct dma_async_tx_descriptor * 2497 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2498 unsigned int sg_len, enum dma_transfer_direction direction, 2499 unsigned long dma_flags, void *context) 2500 { 2501 if (!is_slave_direction(direction)) 2502 return NULL; 2503 2504 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); 2505 } 2506 2507 static struct dma_async_tx_descriptor * 2508 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2509 size_t buf_len, size_t period_len, 2510 enum dma_transfer_direction direction, unsigned long flags) 2511 { 2512 unsigned int periods = buf_len / period_len; 2513 struct dma_async_tx_descriptor *txd; 2514 struct scatterlist *sg; 2515 int i; 2516 2517 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); 2518 if (!sg) 2519 return NULL; 2520 2521 for (i = 0; i < periods; i++) { 2522 sg_dma_address(&sg[i]) = dma_addr; 2523 sg_dma_len(&sg[i]) = period_len; 2524 dma_addr += period_len; 2525 } 2526 2527 sg_chain(sg, periods + 1, sg); 2528 2529 txd = d40_prep_sg(chan, sg, sg, periods, direction, 2530 DMA_PREP_INTERRUPT); 2531 2532 kfree(sg); 2533 2534 return txd; 2535 } 2536 2537 static enum dma_status d40_tx_status(struct dma_chan *chan, 2538 dma_cookie_t cookie, 2539 struct dma_tx_state *txstate) 2540 { 2541 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2542 enum dma_status ret; 2543 2544 if (d40c->phy_chan == NULL) { 2545 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2546 return -EINVAL; 2547 } 2548 2549 ret = dma_cookie_status(chan, cookie, txstate); 2550 if (ret != DMA_COMPLETE && txstate) 2551 dma_set_residue(txstate, stedma40_residue(chan)); 2552 2553 if (d40_is_paused(d40c)) 2554 ret = DMA_PAUSED; 2555 2556 return ret; 2557 } 2558 2559 static void d40_issue_pending(struct dma_chan *chan) 2560 { 2561 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2562 unsigned long flags; 2563 2564 if (d40c->phy_chan == NULL) { 2565 chan_err(d40c, "Channel is not allocated!\n"); 2566 return; 2567 } 2568 2569 spin_lock_irqsave(&d40c->lock, flags); 2570 2571 list_splice_tail_init(&d40c->pending_queue, &d40c->queue); 2572 2573 /* Busy means that queued jobs are already being processed */ 2574 if (!d40c->busy) 2575 (void) d40_queue_start(d40c); 2576 2577 spin_unlock_irqrestore(&d40c->lock, flags); 2578 } 2579 2580 static int d40_terminate_all(struct dma_chan *chan) 2581 { 2582 unsigned long flags; 2583 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2584 int ret; 2585 2586 if (d40c->phy_chan == NULL) { 2587 chan_err(d40c, "Channel is not allocated!\n"); 2588 return -EINVAL; 2589 } 2590 2591 spin_lock_irqsave(&d40c->lock, flags); 2592 2593 pm_runtime_get_sync(d40c->base->dev); 2594 ret = d40_channel_execute_command(d40c, D40_DMA_STOP); 2595 if (ret) 2596 chan_err(d40c, "Failed to stop channel\n"); 2597 2598 d40_term_all(d40c); 2599 pm_runtime_mark_last_busy(d40c->base->dev); 2600 pm_runtime_put_autosuspend(d40c->base->dev); 2601 if (d40c->busy) { 2602 pm_runtime_mark_last_busy(d40c->base->dev); 2603 pm_runtime_put_autosuspend(d40c->base->dev); 2604 } 2605 d40c->busy = false; 2606 2607 spin_unlock_irqrestore(&d40c->lock, flags); 2608 return 0; 2609 } 2610 2611 static int 2612 dma40_config_to_halfchannel(struct d40_chan *d40c, 2613 struct stedma40_half_channel_info *info, 2614 u32 maxburst) 2615 { 2616 int psize; 2617 2618 if (chan_is_logical(d40c)) { 2619 if (maxburst >= 16) 2620 psize = STEDMA40_PSIZE_LOG_16; 2621 else if (maxburst >= 8) 2622 psize = STEDMA40_PSIZE_LOG_8; 2623 else if (maxburst >= 4) 2624 psize = STEDMA40_PSIZE_LOG_4; 2625 else 2626 psize = STEDMA40_PSIZE_LOG_1; 2627 } else { 2628 if (maxburst >= 16) 2629 psize = STEDMA40_PSIZE_PHY_16; 2630 else if (maxburst >= 8) 2631 psize = STEDMA40_PSIZE_PHY_8; 2632 else if (maxburst >= 4) 2633 psize = STEDMA40_PSIZE_PHY_4; 2634 else 2635 psize = STEDMA40_PSIZE_PHY_1; 2636 } 2637 2638 info->psize = psize; 2639 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2640 2641 return 0; 2642 } 2643 2644 static int d40_set_runtime_config(struct dma_chan *chan, 2645 struct dma_slave_config *config) 2646 { 2647 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2648 2649 memcpy(&d40c->slave_config, config, sizeof(*config)); 2650 2651 return 0; 2652 } 2653 2654 /* Runtime reconfiguration extension */ 2655 static int d40_set_runtime_config_write(struct dma_chan *chan, 2656 struct dma_slave_config *config, 2657 enum dma_transfer_direction direction) 2658 { 2659 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2660 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2661 enum dma_slave_buswidth src_addr_width, dst_addr_width; 2662 dma_addr_t config_addr; 2663 u32 src_maxburst, dst_maxburst; 2664 int ret; 2665 2666 if (d40c->phy_chan == NULL) { 2667 chan_err(d40c, "Channel is not allocated!\n"); 2668 return -EINVAL; 2669 } 2670 2671 src_addr_width = config->src_addr_width; 2672 src_maxburst = config->src_maxburst; 2673 dst_addr_width = config->dst_addr_width; 2674 dst_maxburst = config->dst_maxburst; 2675 2676 if (direction == DMA_DEV_TO_MEM) { 2677 config_addr = config->src_addr; 2678 2679 if (cfg->dir != DMA_DEV_TO_MEM) 2680 dev_dbg(d40c->base->dev, 2681 "channel was not configured for peripheral " 2682 "to memory transfer (%d) overriding\n", 2683 cfg->dir); 2684 cfg->dir = DMA_DEV_TO_MEM; 2685 2686 /* Configure the memory side */ 2687 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2688 dst_addr_width = src_addr_width; 2689 if (dst_maxburst == 0) 2690 dst_maxburst = src_maxburst; 2691 2692 } else if (direction == DMA_MEM_TO_DEV) { 2693 config_addr = config->dst_addr; 2694 2695 if (cfg->dir != DMA_MEM_TO_DEV) 2696 dev_dbg(d40c->base->dev, 2697 "channel was not configured for memory " 2698 "to peripheral transfer (%d) overriding\n", 2699 cfg->dir); 2700 cfg->dir = DMA_MEM_TO_DEV; 2701 2702 /* Configure the memory side */ 2703 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2704 src_addr_width = dst_addr_width; 2705 if (src_maxburst == 0) 2706 src_maxburst = dst_maxburst; 2707 } else { 2708 dev_err(d40c->base->dev, 2709 "unrecognized channel direction %d\n", 2710 direction); 2711 return -EINVAL; 2712 } 2713 2714 if (config_addr <= 0) { 2715 dev_err(d40c->base->dev, "no address supplied\n"); 2716 return -EINVAL; 2717 } 2718 2719 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { 2720 dev_err(d40c->base->dev, 2721 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", 2722 src_maxburst, 2723 src_addr_width, 2724 dst_maxburst, 2725 dst_addr_width); 2726 return -EINVAL; 2727 } 2728 2729 if (src_maxburst > 16) { 2730 src_maxburst = 16; 2731 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; 2732 } else if (dst_maxburst > 16) { 2733 dst_maxburst = 16; 2734 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2735 } 2736 2737 /* Only valid widths are; 1, 2, 4 and 8. */ 2738 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2739 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2740 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2741 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2742 !is_power_of_2(src_addr_width) || 2743 !is_power_of_2(dst_addr_width)) 2744 return -EINVAL; 2745 2746 cfg->src_info.data_width = src_addr_width; 2747 cfg->dst_info.data_width = dst_addr_width; 2748 2749 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2750 src_maxburst); 2751 if (ret) 2752 return ret; 2753 2754 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2755 dst_maxburst); 2756 if (ret) 2757 return ret; 2758 2759 /* Fill in register values */ 2760 if (chan_is_logical(d40c)) 2761 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2762 else 2763 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); 2764 2765 /* These settings will take precedence later */ 2766 d40c->runtime_addr = config_addr; 2767 d40c->runtime_direction = direction; 2768 dev_dbg(d40c->base->dev, 2769 "configured channel %s for %s, data width %d/%d, " 2770 "maxburst %d/%d elements, LE, no flow control\n", 2771 dma_chan_name(chan), 2772 (direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 2773 src_addr_width, dst_addr_width, 2774 src_maxburst, dst_maxburst); 2775 2776 return 0; 2777 } 2778 2779 /* Initialization functions */ 2780 2781 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2782 struct d40_chan *chans, int offset, 2783 int num_chans) 2784 { 2785 int i = 0; 2786 struct d40_chan *d40c; 2787 2788 INIT_LIST_HEAD(&dma->channels); 2789 2790 for (i = offset; i < offset + num_chans; i++) { 2791 d40c = &chans[i]; 2792 d40c->base = base; 2793 d40c->chan.device = dma; 2794 2795 spin_lock_init(&d40c->lock); 2796 2797 d40c->log_num = D40_PHY_CHAN; 2798 2799 INIT_LIST_HEAD(&d40c->done); 2800 INIT_LIST_HEAD(&d40c->active); 2801 INIT_LIST_HEAD(&d40c->queue); 2802 INIT_LIST_HEAD(&d40c->pending_queue); 2803 INIT_LIST_HEAD(&d40c->client); 2804 INIT_LIST_HEAD(&d40c->prepare_queue); 2805 2806 tasklet_setup(&d40c->tasklet, dma_tasklet); 2807 2808 list_add_tail(&d40c->chan.device_node, 2809 &dma->channels); 2810 } 2811 } 2812 2813 static void d40_ops_init(struct d40_base *base, struct dma_device *dev) 2814 { 2815 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) { 2816 dev->device_prep_slave_sg = d40_prep_slave_sg; 2817 dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2818 } 2819 2820 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { 2821 dev->device_prep_dma_memcpy = d40_prep_memcpy; 2822 dev->directions = BIT(DMA_MEM_TO_MEM); 2823 /* 2824 * This controller can only access address at even 2825 * 32bit boundaries, i.e. 2^2 2826 */ 2827 dev->copy_align = DMAENGINE_ALIGN_4_BYTES; 2828 } 2829 2830 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) 2831 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; 2832 2833 dev->device_alloc_chan_resources = d40_alloc_chan_resources; 2834 dev->device_free_chan_resources = d40_free_chan_resources; 2835 dev->device_issue_pending = d40_issue_pending; 2836 dev->device_tx_status = d40_tx_status; 2837 dev->device_config = d40_set_runtime_config; 2838 dev->device_pause = d40_pause; 2839 dev->device_resume = d40_resume; 2840 dev->device_terminate_all = d40_terminate_all; 2841 dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 2842 dev->dev = base->dev; 2843 } 2844 2845 static int __init d40_dmaengine_init(struct d40_base *base, 2846 int num_reserved_chans) 2847 { 2848 int err ; 2849 2850 d40_chan_init(base, &base->dma_slave, base->log_chans, 2851 0, base->num_log_chans); 2852 2853 dma_cap_zero(base->dma_slave.cap_mask); 2854 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2855 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2856 2857 d40_ops_init(base, &base->dma_slave); 2858 2859 err = dmaenginem_async_device_register(&base->dma_slave); 2860 2861 if (err) { 2862 d40_err(base->dev, "Failed to register slave channels\n"); 2863 goto exit; 2864 } 2865 2866 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2867 base->num_log_chans, base->num_memcpy_chans); 2868 2869 dma_cap_zero(base->dma_memcpy.cap_mask); 2870 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2871 2872 d40_ops_init(base, &base->dma_memcpy); 2873 2874 err = dmaenginem_async_device_register(&base->dma_memcpy); 2875 2876 if (err) { 2877 d40_err(base->dev, 2878 "Failed to register memcpy only channels\n"); 2879 goto exit; 2880 } 2881 2882 d40_chan_init(base, &base->dma_both, base->phy_chans, 2883 0, num_reserved_chans); 2884 2885 dma_cap_zero(base->dma_both.cap_mask); 2886 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2887 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2888 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2889 2890 d40_ops_init(base, &base->dma_both); 2891 err = dmaenginem_async_device_register(&base->dma_both); 2892 2893 if (err) { 2894 d40_err(base->dev, 2895 "Failed to register logical and physical capable channels\n"); 2896 goto exit; 2897 } 2898 return 0; 2899 exit: 2900 return err; 2901 } 2902 2903 /* Suspend resume functionality */ 2904 #ifdef CONFIG_PM_SLEEP 2905 static int dma40_suspend(struct device *dev) 2906 { 2907 struct d40_base *base = dev_get_drvdata(dev); 2908 int ret; 2909 2910 ret = pm_runtime_force_suspend(dev); 2911 if (ret) 2912 return ret; 2913 2914 if (base->lcpa_regulator) 2915 ret = regulator_disable(base->lcpa_regulator); 2916 return ret; 2917 } 2918 2919 static int dma40_resume(struct device *dev) 2920 { 2921 struct d40_base *base = dev_get_drvdata(dev); 2922 int ret = 0; 2923 2924 if (base->lcpa_regulator) { 2925 ret = regulator_enable(base->lcpa_regulator); 2926 if (ret) 2927 return ret; 2928 } 2929 2930 return pm_runtime_force_resume(dev); 2931 } 2932 #endif 2933 2934 #ifdef CONFIG_PM 2935 static void dma40_backup(void __iomem *baseaddr, u32 *backup, 2936 u32 *regaddr, int num, bool save) 2937 { 2938 int i; 2939 2940 for (i = 0; i < num; i++) { 2941 void __iomem *addr = baseaddr + regaddr[i]; 2942 2943 if (save) 2944 backup[i] = readl_relaxed(addr); 2945 else 2946 writel_relaxed(backup[i], addr); 2947 } 2948 } 2949 2950 static void d40_save_restore_registers(struct d40_base *base, bool save) 2951 { 2952 int i; 2953 2954 /* Save/Restore channel specific registers */ 2955 for (i = 0; i < base->num_phy_chans; i++) { 2956 void __iomem *addr; 2957 int idx; 2958 2959 if (base->phy_res[i].reserved) 2960 continue; 2961 2962 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; 2963 idx = i * ARRAY_SIZE(d40_backup_regs_chan); 2964 2965 dma40_backup(addr, &base->reg_val_backup_chan[idx], 2966 d40_backup_regs_chan, 2967 ARRAY_SIZE(d40_backup_regs_chan), 2968 save); 2969 } 2970 2971 /* Save/Restore global registers */ 2972 dma40_backup(base->virtbase, base->reg_val_backup, 2973 d40_backup_regs, ARRAY_SIZE(d40_backup_regs), 2974 save); 2975 2976 /* Save/Restore registers only existing on dma40 v3 and later */ 2977 if (base->gen_dmac.backup) 2978 dma40_backup(base->virtbase, base->reg_val_backup_v4, 2979 base->gen_dmac.backup, 2980 base->gen_dmac.backup_size, 2981 save); 2982 } 2983 2984 static int dma40_runtime_suspend(struct device *dev) 2985 { 2986 struct d40_base *base = dev_get_drvdata(dev); 2987 2988 d40_save_restore_registers(base, true); 2989 2990 /* Don't disable/enable clocks for v1 due to HW bugs */ 2991 if (base->rev != 1) 2992 writel_relaxed(base->gcc_pwr_off_mask, 2993 base->virtbase + D40_DREG_GCC); 2994 2995 return 0; 2996 } 2997 2998 static int dma40_runtime_resume(struct device *dev) 2999 { 3000 struct d40_base *base = dev_get_drvdata(dev); 3001 3002 d40_save_restore_registers(base, false); 3003 3004 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, 3005 base->virtbase + D40_DREG_GCC); 3006 return 0; 3007 } 3008 #endif 3009 3010 static const struct dev_pm_ops dma40_pm_ops = { 3011 SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume) 3012 SET_RUNTIME_PM_OPS(dma40_runtime_suspend, 3013 dma40_runtime_resume, 3014 NULL) 3015 }; 3016 3017 /* Initialization functions. */ 3018 3019 static int __init d40_phy_res_init(struct d40_base *base) 3020 { 3021 int i; 3022 int num_phy_chans_avail = 0; 3023 u32 val[2]; 3024 int odd_even_bit = -2; 3025 int gcc = D40_DREG_GCC_ENA; 3026 3027 val[0] = readl(base->virtbase + D40_DREG_PRSME); 3028 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 3029 3030 for (i = 0; i < base->num_phy_chans; i++) { 3031 base->phy_res[i].num = i; 3032 odd_even_bit += 2 * ((i % 2) == 0); 3033 if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 3034 /* Mark security only channels as occupied */ 3035 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 3036 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 3037 base->phy_res[i].reserved = true; 3038 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3039 D40_DREG_GCC_SRC); 3040 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3041 D40_DREG_GCC_DST); 3042 3043 3044 } else { 3045 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 3046 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 3047 base->phy_res[i].reserved = false; 3048 num_phy_chans_avail++; 3049 } 3050 spin_lock_init(&base->phy_res[i].lock); 3051 } 3052 3053 /* Mark disabled channels as occupied */ 3054 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 3055 int chan = base->plat_data->disabled_channels[i]; 3056 3057 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; 3058 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; 3059 base->phy_res[chan].reserved = true; 3060 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3061 D40_DREG_GCC_SRC); 3062 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3063 D40_DREG_GCC_DST); 3064 num_phy_chans_avail--; 3065 } 3066 3067 /* Mark soft_lli channels */ 3068 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { 3069 int chan = base->plat_data->soft_lli_chans[i]; 3070 3071 base->phy_res[chan].use_soft_lli = true; 3072 } 3073 3074 dev_info(base->dev, "%d of %d physical DMA channels available\n", 3075 num_phy_chans_avail, base->num_phy_chans); 3076 3077 /* Verify settings extended vs standard */ 3078 val[0] = readl(base->virtbase + D40_DREG_PRTYP); 3079 3080 for (i = 0; i < base->num_phy_chans; i++) { 3081 3082 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 3083 (val[0] & 0x3) != 1) 3084 dev_info(base->dev, 3085 "[%s] INFO: channel %d is misconfigured (%d)\n", 3086 __func__, i, val[0] & 0x3); 3087 3088 val[0] = val[0] >> 2; 3089 } 3090 3091 /* 3092 * To keep things simple, Enable all clocks initially. 3093 * The clocks will get managed later post channel allocation. 3094 * The clocks for the event lines on which reserved channels exists 3095 * are not managed here. 3096 */ 3097 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); 3098 base->gcc_pwr_off_mask = gcc; 3099 3100 return num_phy_chans_avail; 3101 } 3102 3103 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3104 { 3105 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3106 struct clk *clk; 3107 void __iomem *virtbase; 3108 struct resource *res; 3109 struct d40_base *base; 3110 int num_log_chans; 3111 int num_phy_chans; 3112 int num_memcpy_chans; 3113 int clk_ret = -EINVAL; 3114 int i; 3115 u32 pid; 3116 u32 cid; 3117 u8 rev; 3118 3119 clk = clk_get(&pdev->dev, NULL); 3120 if (IS_ERR(clk)) { 3121 d40_err(&pdev->dev, "No matching clock found\n"); 3122 goto check_prepare_enabled; 3123 } 3124 3125 clk_ret = clk_prepare_enable(clk); 3126 if (clk_ret) { 3127 d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 3128 goto disable_unprepare; 3129 } 3130 3131 /* Get IO for DMAC base address */ 3132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3133 if (!res) 3134 goto disable_unprepare; 3135 3136 if (request_mem_region(res->start, resource_size(res), 3137 D40_NAME " I/O base") == NULL) 3138 goto release_region; 3139 3140 virtbase = ioremap(res->start, resource_size(res)); 3141 if (!virtbase) 3142 goto release_region; 3143 3144 /* This is just a regular AMBA PrimeCell ID actually */ 3145 for (pid = 0, i = 0; i < 4; i++) 3146 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) 3147 & 255) << (i * 8); 3148 for (cid = 0, i = 0; i < 4; i++) 3149 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) 3150 & 255) << (i * 8); 3151 3152 if (cid != AMBA_CID) { 3153 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); 3154 goto unmap_io; 3155 } 3156 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { 3157 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 3158 AMBA_MANF_BITS(pid), 3159 AMBA_VENDOR_ST); 3160 goto unmap_io; 3161 } 3162 /* 3163 * HW revision: 3164 * DB8500ed has revision 0 3165 * ? has revision 1 3166 * DB8500v1 has revision 2 3167 * DB8500v2 has revision 3 3168 * AP9540v1 has revision 4 3169 * DB8540v1 has revision 4 3170 */ 3171 rev = AMBA_REV_BITS(pid); 3172 if (rev < 2) { 3173 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); 3174 goto unmap_io; 3175 } 3176 3177 /* The number of physical channels on this HW */ 3178 if (plat_data->num_of_phy_chans) 3179 num_phy_chans = plat_data->num_of_phy_chans; 3180 else 3181 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3182 3183 /* The number of channels used for memcpy */ 3184 if (plat_data->num_of_memcpy_chans) 3185 num_memcpy_chans = plat_data->num_of_memcpy_chans; 3186 else 3187 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); 3188 3189 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; 3190 3191 dev_info(&pdev->dev, 3192 "hardware rev: %d @ %pa with %d physical and %d logical channels\n", 3193 rev, &res->start, num_phy_chans, num_log_chans); 3194 3195 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3196 (num_phy_chans + num_log_chans + num_memcpy_chans) * 3197 sizeof(struct d40_chan), GFP_KERNEL); 3198 3199 if (base == NULL) 3200 goto unmap_io; 3201 3202 base->rev = rev; 3203 base->clk = clk; 3204 base->num_memcpy_chans = num_memcpy_chans; 3205 base->num_phy_chans = num_phy_chans; 3206 base->num_log_chans = num_log_chans; 3207 base->phy_start = res->start; 3208 base->phy_size = resource_size(res); 3209 base->virtbase = virtbase; 3210 base->plat_data = plat_data; 3211 base->dev = &pdev->dev; 3212 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 3213 base->log_chans = &base->phy_chans[num_phy_chans]; 3214 3215 if (base->plat_data->num_of_phy_chans == 14) { 3216 base->gen_dmac.backup = d40_backup_regs_v4b; 3217 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; 3218 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; 3219 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; 3220 base->gen_dmac.realtime_en = D40_DREG_CRSEG1; 3221 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; 3222 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; 3223 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; 3224 base->gen_dmac.il = il_v4b; 3225 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); 3226 base->gen_dmac.init_reg = dma_init_reg_v4b; 3227 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); 3228 } else { 3229 if (base->rev >= 3) { 3230 base->gen_dmac.backup = d40_backup_regs_v4a; 3231 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; 3232 } 3233 base->gen_dmac.interrupt_en = D40_DREG_PCMIS; 3234 base->gen_dmac.interrupt_clear = D40_DREG_PCICR; 3235 base->gen_dmac.realtime_en = D40_DREG_RSEG1; 3236 base->gen_dmac.realtime_clear = D40_DREG_RCEG1; 3237 base->gen_dmac.high_prio_en = D40_DREG_PSEG1; 3238 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; 3239 base->gen_dmac.il = il_v4a; 3240 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); 3241 base->gen_dmac.init_reg = dma_init_reg_v4a; 3242 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); 3243 } 3244 3245 base->phy_res = kcalloc(num_phy_chans, 3246 sizeof(*base->phy_res), 3247 GFP_KERNEL); 3248 if (!base->phy_res) 3249 goto free_base; 3250 3251 base->lookup_phy_chans = kcalloc(num_phy_chans, 3252 sizeof(*base->lookup_phy_chans), 3253 GFP_KERNEL); 3254 if (!base->lookup_phy_chans) 3255 goto free_phy_res; 3256 3257 base->lookup_log_chans = kcalloc(num_log_chans, 3258 sizeof(*base->lookup_log_chans), 3259 GFP_KERNEL); 3260 if (!base->lookup_log_chans) 3261 goto free_phy_chans; 3262 3263 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans, 3264 sizeof(d40_backup_regs_chan), 3265 GFP_KERNEL); 3266 if (!base->reg_val_backup_chan) 3267 goto free_log_chans; 3268 3269 base->lcla_pool.alloc_map = kcalloc(num_phy_chans 3270 * D40_LCLA_LINK_PER_EVENT_GRP, 3271 sizeof(*base->lcla_pool.alloc_map), 3272 GFP_KERNEL); 3273 if (!base->lcla_pool.alloc_map) 3274 goto free_backup_chan; 3275 3276 base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size, 3277 sizeof(*base->regs_interrupt), 3278 GFP_KERNEL); 3279 if (!base->regs_interrupt) 3280 goto free_map; 3281 3282 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3283 0, SLAB_HWCACHE_ALIGN, 3284 NULL); 3285 if (base->desc_slab == NULL) 3286 goto free_regs; 3287 3288 3289 return base; 3290 free_regs: 3291 kfree(base->regs_interrupt); 3292 free_map: 3293 kfree(base->lcla_pool.alloc_map); 3294 free_backup_chan: 3295 kfree(base->reg_val_backup_chan); 3296 free_log_chans: 3297 kfree(base->lookup_log_chans); 3298 free_phy_chans: 3299 kfree(base->lookup_phy_chans); 3300 free_phy_res: 3301 kfree(base->phy_res); 3302 free_base: 3303 kfree(base); 3304 unmap_io: 3305 iounmap(virtbase); 3306 release_region: 3307 release_mem_region(res->start, resource_size(res)); 3308 check_prepare_enabled: 3309 if (!clk_ret) 3310 disable_unprepare: 3311 clk_disable_unprepare(clk); 3312 if (!IS_ERR(clk)) 3313 clk_put(clk); 3314 return NULL; 3315 } 3316 3317 static void __init d40_hw_init(struct d40_base *base) 3318 { 3319 3320 int i; 3321 u32 prmseo[2] = {0, 0}; 3322 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 3323 u32 pcmis = 0; 3324 u32 pcicr = 0; 3325 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; 3326 u32 reg_size = base->gen_dmac.init_reg_size; 3327 3328 for (i = 0; i < reg_size; i++) 3329 writel(dma_init_reg[i].val, 3330 base->virtbase + dma_init_reg[i].reg); 3331 3332 /* Configure all our dma channels to default settings */ 3333 for (i = 0; i < base->num_phy_chans; i++) { 3334 3335 activeo[i % 2] = activeo[i % 2] << 2; 3336 3337 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 3338 == D40_ALLOC_PHY) { 3339 activeo[i % 2] |= 3; 3340 continue; 3341 } 3342 3343 /* Enable interrupt # */ 3344 pcmis = (pcmis << 1) | 1; 3345 3346 /* Clear interrupt # */ 3347 pcicr = (pcicr << 1) | 1; 3348 3349 /* Set channel to physical mode */ 3350 prmseo[i % 2] = prmseo[i % 2] << 2; 3351 prmseo[i % 2] |= 1; 3352 3353 } 3354 3355 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 3356 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 3357 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 3358 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 3359 3360 /* Write which interrupt to enable */ 3361 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); 3362 3363 /* Write which interrupt to clear */ 3364 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); 3365 3366 /* These are __initdata and cannot be accessed after init */ 3367 base->gen_dmac.init_reg = NULL; 3368 base->gen_dmac.init_reg_size = 0; 3369 } 3370 3371 static int __init d40_lcla_allocate(struct d40_base *base) 3372 { 3373 struct d40_lcla_pool *pool = &base->lcla_pool; 3374 unsigned long *page_list; 3375 int i, j; 3376 int ret; 3377 3378 /* 3379 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, 3380 * To full fill this hardware requirement without wasting 256 kb 3381 * we allocate pages until we get an aligned one. 3382 */ 3383 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, 3384 sizeof(*page_list), 3385 GFP_KERNEL); 3386 if (!page_list) 3387 return -ENOMEM; 3388 3389 /* Calculating how many pages that are required */ 3390 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; 3391 3392 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { 3393 page_list[i] = __get_free_pages(GFP_KERNEL, 3394 base->lcla_pool.pages); 3395 if (!page_list[i]) { 3396 3397 d40_err(base->dev, "Failed to allocate %d pages.\n", 3398 base->lcla_pool.pages); 3399 ret = -ENOMEM; 3400 3401 for (j = 0; j < i; j++) 3402 free_pages(page_list[j], base->lcla_pool.pages); 3403 goto free_page_list; 3404 } 3405 3406 if ((virt_to_phys((void *)page_list[i]) & 3407 (LCLA_ALIGNMENT - 1)) == 0) 3408 break; 3409 } 3410 3411 for (j = 0; j < i; j++) 3412 free_pages(page_list[j], base->lcla_pool.pages); 3413 3414 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 3415 base->lcla_pool.base = (void *)page_list[i]; 3416 } else { 3417 /* 3418 * After many attempts and no succees with finding the correct 3419 * alignment, try with allocating a big buffer. 3420 */ 3421 dev_warn(base->dev, 3422 "[%s] Failed to get %d pages @ 18 bit align.\n", 3423 __func__, base->lcla_pool.pages); 3424 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * 3425 base->num_phy_chans + 3426 LCLA_ALIGNMENT, 3427 GFP_KERNEL); 3428 if (!base->lcla_pool.base_unaligned) { 3429 ret = -ENOMEM; 3430 goto free_page_list; 3431 } 3432 3433 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, 3434 LCLA_ALIGNMENT); 3435 } 3436 3437 pool->dma_addr = dma_map_single(base->dev, pool->base, 3438 SZ_1K * base->num_phy_chans, 3439 DMA_TO_DEVICE); 3440 if (dma_mapping_error(base->dev, pool->dma_addr)) { 3441 pool->dma_addr = 0; 3442 ret = -ENOMEM; 3443 goto free_page_list; 3444 } 3445 3446 writel(virt_to_phys(base->lcla_pool.base), 3447 base->virtbase + D40_DREG_LCLA); 3448 ret = 0; 3449 free_page_list: 3450 kfree(page_list); 3451 return ret; 3452 } 3453 3454 static int __init d40_of_probe(struct platform_device *pdev, 3455 struct device_node *np) 3456 { 3457 struct stedma40_platform_data *pdata; 3458 int num_phy = 0, num_memcpy = 0, num_disabled = 0; 3459 const __be32 *list; 3460 3461 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 3462 if (!pdata) 3463 return -ENOMEM; 3464 3465 /* If absent this value will be obtained from h/w. */ 3466 of_property_read_u32(np, "dma-channels", &num_phy); 3467 if (num_phy > 0) 3468 pdata->num_of_phy_chans = num_phy; 3469 3470 list = of_get_property(np, "memcpy-channels", &num_memcpy); 3471 num_memcpy /= sizeof(*list); 3472 3473 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { 3474 d40_err(&pdev->dev, 3475 "Invalid number of memcpy channels specified (%d)\n", 3476 num_memcpy); 3477 return -EINVAL; 3478 } 3479 pdata->num_of_memcpy_chans = num_memcpy; 3480 3481 of_property_read_u32_array(np, "memcpy-channels", 3482 dma40_memcpy_channels, 3483 num_memcpy); 3484 3485 list = of_get_property(np, "disabled-channels", &num_disabled); 3486 num_disabled /= sizeof(*list); 3487 3488 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { 3489 d40_err(&pdev->dev, 3490 "Invalid number of disabled channels specified (%d)\n", 3491 num_disabled); 3492 return -EINVAL; 3493 } 3494 3495 of_property_read_u32_array(np, "disabled-channels", 3496 pdata->disabled_channels, 3497 num_disabled); 3498 pdata->disabled_channels[num_disabled] = -1; 3499 3500 pdev->dev.platform_data = pdata; 3501 3502 return 0; 3503 } 3504 3505 static int __init d40_probe(struct platform_device *pdev) 3506 { 3507 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3508 struct device_node *np = pdev->dev.of_node; 3509 int ret = -ENOENT; 3510 struct d40_base *base; 3511 struct resource *res; 3512 int num_reserved_chans; 3513 u32 val; 3514 3515 if (!plat_data) { 3516 if (np) { 3517 if (d40_of_probe(pdev, np)) { 3518 ret = -ENOMEM; 3519 goto report_failure; 3520 } 3521 } else { 3522 d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); 3523 goto report_failure; 3524 } 3525 } 3526 3527 base = d40_hw_detect_init(pdev); 3528 if (!base) 3529 goto report_failure; 3530 3531 num_reserved_chans = d40_phy_res_init(base); 3532 3533 platform_set_drvdata(pdev, base); 3534 3535 spin_lock_init(&base->interrupt_lock); 3536 spin_lock_init(&base->execmd_lock); 3537 3538 /* Get IO for logical channel parameter address */ 3539 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 3540 if (!res) { 3541 ret = -ENOENT; 3542 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); 3543 goto destroy_cache; 3544 } 3545 base->lcpa_size = resource_size(res); 3546 base->phy_lcpa = res->start; 3547 3548 if (request_mem_region(res->start, resource_size(res), 3549 D40_NAME " I/O lcpa") == NULL) { 3550 ret = -EBUSY; 3551 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); 3552 goto destroy_cache; 3553 } 3554 3555 /* We make use of ESRAM memory for this. */ 3556 val = readl(base->virtbase + D40_DREG_LCPA); 3557 if (res->start != val && val != 0) { 3558 dev_warn(&pdev->dev, 3559 "[%s] Mismatch LCPA dma 0x%x, def %pa\n", 3560 __func__, val, &res->start); 3561 } else 3562 writel(res->start, base->virtbase + D40_DREG_LCPA); 3563 3564 base->lcpa_base = ioremap(res->start, resource_size(res)); 3565 if (!base->lcpa_base) { 3566 ret = -ENOMEM; 3567 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 3568 goto destroy_cache; 3569 } 3570 /* If lcla has to be located in ESRAM we don't need to allocate */ 3571 if (base->plat_data->use_esram_lcla) { 3572 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 3573 "lcla_esram"); 3574 if (!res) { 3575 ret = -ENOENT; 3576 d40_err(&pdev->dev, 3577 "No \"lcla_esram\" memory resource\n"); 3578 goto destroy_cache; 3579 } 3580 base->lcla_pool.base = ioremap(res->start, 3581 resource_size(res)); 3582 if (!base->lcla_pool.base) { 3583 ret = -ENOMEM; 3584 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); 3585 goto destroy_cache; 3586 } 3587 writel(res->start, base->virtbase + D40_DREG_LCLA); 3588 3589 } else { 3590 ret = d40_lcla_allocate(base); 3591 if (ret) { 3592 d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 3593 goto destroy_cache; 3594 } 3595 } 3596 3597 spin_lock_init(&base->lcla_pool.lock); 3598 3599 base->irq = platform_get_irq(pdev, 0); 3600 3601 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 3602 if (ret) { 3603 d40_err(&pdev->dev, "No IRQ defined\n"); 3604 goto destroy_cache; 3605 } 3606 3607 if (base->plat_data->use_esram_lcla) { 3608 3609 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); 3610 if (IS_ERR(base->lcpa_regulator)) { 3611 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3612 ret = PTR_ERR(base->lcpa_regulator); 3613 base->lcpa_regulator = NULL; 3614 goto destroy_cache; 3615 } 3616 3617 ret = regulator_enable(base->lcpa_regulator); 3618 if (ret) { 3619 d40_err(&pdev->dev, 3620 "Failed to enable lcpa_regulator\n"); 3621 regulator_put(base->lcpa_regulator); 3622 base->lcpa_regulator = NULL; 3623 goto destroy_cache; 3624 } 3625 } 3626 3627 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); 3628 3629 pm_runtime_irq_safe(base->dev); 3630 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); 3631 pm_runtime_use_autosuspend(base->dev); 3632 pm_runtime_mark_last_busy(base->dev); 3633 pm_runtime_set_active(base->dev); 3634 pm_runtime_enable(base->dev); 3635 3636 ret = d40_dmaengine_init(base, num_reserved_chans); 3637 if (ret) 3638 goto destroy_cache; 3639 3640 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3641 if (ret) { 3642 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3643 goto destroy_cache; 3644 } 3645 3646 d40_hw_init(base); 3647 3648 if (np) { 3649 ret = of_dma_controller_register(np, d40_xlate, NULL); 3650 if (ret) 3651 dev_err(&pdev->dev, 3652 "could not register of_dma_controller\n"); 3653 } 3654 3655 dev_info(base->dev, "initialized\n"); 3656 return 0; 3657 destroy_cache: 3658 kmem_cache_destroy(base->desc_slab); 3659 if (base->virtbase) 3660 iounmap(base->virtbase); 3661 3662 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { 3663 iounmap(base->lcla_pool.base); 3664 base->lcla_pool.base = NULL; 3665 } 3666 3667 if (base->lcla_pool.dma_addr) 3668 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, 3669 SZ_1K * base->num_phy_chans, 3670 DMA_TO_DEVICE); 3671 3672 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 3673 free_pages((unsigned long)base->lcla_pool.base, 3674 base->lcla_pool.pages); 3675 3676 kfree(base->lcla_pool.base_unaligned); 3677 3678 if (base->phy_lcpa) 3679 release_mem_region(base->phy_lcpa, 3680 base->lcpa_size); 3681 if (base->phy_start) 3682 release_mem_region(base->phy_start, 3683 base->phy_size); 3684 if (base->clk) { 3685 clk_disable_unprepare(base->clk); 3686 clk_put(base->clk); 3687 } 3688 3689 if (base->lcpa_regulator) { 3690 regulator_disable(base->lcpa_regulator); 3691 regulator_put(base->lcpa_regulator); 3692 } 3693 3694 kfree(base->lcla_pool.alloc_map); 3695 kfree(base->lookup_log_chans); 3696 kfree(base->lookup_phy_chans); 3697 kfree(base->phy_res); 3698 kfree(base); 3699 report_failure: 3700 d40_err(&pdev->dev, "probe failed\n"); 3701 return ret; 3702 } 3703 3704 static const struct of_device_id d40_match[] = { 3705 { .compatible = "stericsson,dma40", }, 3706 {} 3707 }; 3708 3709 static struct platform_driver d40_driver = { 3710 .driver = { 3711 .name = D40_NAME, 3712 .pm = &dma40_pm_ops, 3713 .of_match_table = d40_match, 3714 }, 3715 }; 3716 3717 static int __init stedma40_init(void) 3718 { 3719 return platform_driver_probe(&d40_driver, d40_probe); 3720 } 3721 subsys_initcall(stedma40_init); 3722