1 // SPDX-License-Identifier: GPL-2.0 2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com) 3 4 /* 5 * Synopsys DesignWare AXI DMA Controller driver. 6 * 7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/dmaengine.h> 14 #include <linux/dmapool.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/err.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/io-64-nonatomic-lo-hi.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/of_dma.h> 26 #include <linux/platform_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/property.h> 29 #include <linux/reset.h> 30 #include <linux/slab.h> 31 #include <linux/types.h> 32 33 #include "dw-axi-dmac.h" 34 #include "../dmaengine.h" 35 #include "../virt-dma.h" 36 37 /* 38 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports 39 * master data bus width up to 512 bits (for both AXI master interfaces), but 40 * it depends on IP block configuration. 41 */ 42 #define AXI_DMA_BUSWIDTHS \ 43 (DMA_SLAVE_BUSWIDTH_1_BYTE | \ 44 DMA_SLAVE_BUSWIDTH_2_BYTES | \ 45 DMA_SLAVE_BUSWIDTH_4_BYTES | \ 46 DMA_SLAVE_BUSWIDTH_8_BYTES | \ 47 DMA_SLAVE_BUSWIDTH_16_BYTES | \ 48 DMA_SLAVE_BUSWIDTH_32_BYTES | \ 49 DMA_SLAVE_BUSWIDTH_64_BYTES) 50 51 #define AXI_DMA_FLAG_HAS_APB_REGS BIT(0) 52 #define AXI_DMA_FLAG_HAS_RESETS BIT(1) 53 #define AXI_DMA_FLAG_USE_CFG2 BIT(2) 54 55 static inline void 56 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val) 57 { 58 iowrite32(val, chip->regs + reg); 59 } 60 61 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg) 62 { 63 return ioread32(chip->regs + reg); 64 } 65 66 static inline void 67 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val) 68 { 69 iowrite32(val, chan->chan_regs + reg); 70 } 71 72 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg) 73 { 74 return ioread32(chan->chan_regs + reg); 75 } 76 77 static inline void 78 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val) 79 { 80 /* 81 * We split one 64 bit write for two 32 bit write as some HW doesn't 82 * support 64 bit access. 83 */ 84 iowrite32(lower_32_bits(val), chan->chan_regs + reg); 85 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4); 86 } 87 88 static inline void axi_chan_config_write(struct axi_dma_chan *chan, 89 struct axi_dma_chan_config *config) 90 { 91 u32 cfg_lo, cfg_hi; 92 93 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS | 94 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS); 95 if (chan->chip->dw->hdata->reg_map_8_channels && 96 !chan->chip->dw->hdata->use_cfg2) { 97 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS | 98 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS | 99 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS | 100 config->src_per << CH_CFG_H_SRC_PER_POS | 101 config->dst_per << CH_CFG_H_DST_PER_POS | 102 config->prior << CH_CFG_H_PRIORITY_POS; 103 } else { 104 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS | 105 config->dst_per << CH_CFG2_L_DST_PER_POS; 106 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS | 107 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS | 108 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS | 109 config->prior << CH_CFG2_H_PRIORITY_POS; 110 } 111 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo); 112 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi); 113 } 114 115 static inline void axi_dma_disable(struct axi_dma_chip *chip) 116 { 117 u32 val; 118 119 val = axi_dma_ioread32(chip, DMAC_CFG); 120 val &= ~DMAC_EN_MASK; 121 axi_dma_iowrite32(chip, DMAC_CFG, val); 122 } 123 124 static inline void axi_dma_enable(struct axi_dma_chip *chip) 125 { 126 u32 val; 127 128 val = axi_dma_ioread32(chip, DMAC_CFG); 129 val |= DMAC_EN_MASK; 130 axi_dma_iowrite32(chip, DMAC_CFG, val); 131 } 132 133 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip) 134 { 135 u32 val; 136 137 val = axi_dma_ioread32(chip, DMAC_CFG); 138 val &= ~INT_EN_MASK; 139 axi_dma_iowrite32(chip, DMAC_CFG, val); 140 } 141 142 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip) 143 { 144 u32 val; 145 146 val = axi_dma_ioread32(chip, DMAC_CFG); 147 val |= INT_EN_MASK; 148 axi_dma_iowrite32(chip, DMAC_CFG, val); 149 } 150 151 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask) 152 { 153 u32 val; 154 155 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) { 156 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE); 157 } else { 158 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA); 159 val &= ~irq_mask; 160 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val); 161 } 162 } 163 164 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask) 165 { 166 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask); 167 } 168 169 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask) 170 { 171 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask); 172 } 173 174 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask) 175 { 176 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask); 177 } 178 179 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan) 180 { 181 return axi_chan_ioread32(chan, CH_INTSTATUS); 182 } 183 184 static inline void axi_chan_disable(struct axi_dma_chan *chan) 185 { 186 u32 val; 187 188 val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 189 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); 190 if (chan->chip->dw->hdata->reg_map_8_channels) 191 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 192 else 193 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 194 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 195 } 196 197 static inline void axi_chan_enable(struct axi_dma_chan *chan) 198 { 199 u32 val; 200 201 val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 202 if (chan->chip->dw->hdata->reg_map_8_channels) 203 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 204 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 205 else 206 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 207 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 208 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 209 } 210 211 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) 212 { 213 u32 val; 214 215 val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 216 217 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT)); 218 } 219 220 static void axi_dma_hw_init(struct axi_dma_chip *chip) 221 { 222 int ret; 223 u32 i; 224 225 for (i = 0; i < chip->dw->hdata->nr_channels; i++) { 226 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); 227 axi_chan_disable(&chip->dw->chan[i]); 228 } 229 ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64)); 230 if (ret) 231 dev_warn(chip->dev, "Unable to set coherent mask\n"); 232 } 233 234 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src, 235 dma_addr_t dst, size_t len) 236 { 237 u32 max_width = chan->chip->dw->hdata->m_data_width; 238 239 return __ffs(src | dst | len | BIT(max_width)); 240 } 241 242 static inline const char *axi_chan_name(struct axi_dma_chan *chan) 243 { 244 return dma_chan_name(&chan->vc.chan); 245 } 246 247 static struct axi_dma_desc *axi_desc_alloc(u32 num) 248 { 249 struct axi_dma_desc *desc; 250 251 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 252 if (!desc) 253 return NULL; 254 255 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT); 256 if (!desc->hw_desc) { 257 kfree(desc); 258 return NULL; 259 } 260 261 return desc; 262 } 263 264 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, 265 dma_addr_t *addr) 266 { 267 struct axi_dma_lli *lli; 268 dma_addr_t phys; 269 270 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); 271 if (unlikely(!lli)) { 272 dev_err(chan2dev(chan), "%s: not enough descriptors available\n", 273 axi_chan_name(chan)); 274 return NULL; 275 } 276 277 atomic_inc(&chan->descs_allocated); 278 *addr = phys; 279 280 return lli; 281 } 282 283 static void axi_desc_put(struct axi_dma_desc *desc) 284 { 285 struct axi_dma_chan *chan = desc->chan; 286 int count = atomic_read(&chan->descs_allocated); 287 struct axi_dma_hw_desc *hw_desc; 288 int descs_put; 289 290 for (descs_put = 0; descs_put < count; descs_put++) { 291 hw_desc = &desc->hw_desc[descs_put]; 292 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp); 293 } 294 295 kfree(desc->hw_desc); 296 kfree(desc); 297 atomic_sub(descs_put, &chan->descs_allocated); 298 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n", 299 axi_chan_name(chan), descs_put, 300 atomic_read(&chan->descs_allocated)); 301 } 302 303 static void vchan_desc_put(struct virt_dma_desc *vdesc) 304 { 305 axi_desc_put(vd_to_axi_desc(vdesc)); 306 } 307 308 static enum dma_status 309 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 310 struct dma_tx_state *txstate) 311 { 312 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 313 struct virt_dma_desc *vdesc; 314 enum dma_status status; 315 u32 completed_length; 316 unsigned long flags; 317 u32 completed_blocks; 318 size_t bytes = 0; 319 u32 length; 320 u32 len; 321 322 status = dma_cookie_status(dchan, cookie, txstate); 323 if (status == DMA_COMPLETE || !txstate) 324 return status; 325 326 spin_lock_irqsave(&chan->vc.lock, flags); 327 328 vdesc = vchan_find_desc(&chan->vc, cookie); 329 if (vdesc) { 330 length = vd_to_axi_desc(vdesc)->length; 331 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks; 332 len = vd_to_axi_desc(vdesc)->hw_desc[0].len; 333 completed_length = completed_blocks * len; 334 bytes = length - completed_length; 335 } 336 337 spin_unlock_irqrestore(&chan->vc.lock, flags); 338 dma_set_residue(txstate, bytes); 339 340 return status; 341 } 342 343 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr) 344 { 345 desc->lli->llp = cpu_to_le64(adr); 346 } 347 348 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) 349 { 350 axi_chan_iowrite64(chan, CH_LLP, adr); 351 } 352 353 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) 354 { 355 u32 offset = DMAC_APB_BYTE_WR_CH_EN; 356 u32 reg_width, val; 357 358 if (!chan->chip->apb_regs) { 359 dev_dbg(chan->chip->dev, "apb_regs not initialized\n"); 360 return; 361 } 362 363 reg_width = __ffs(chan->config.dst_addr_width); 364 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16) 365 offset = DMAC_APB_HALFWORD_WR_CH_EN; 366 367 val = ioread32(chan->chip->apb_regs + offset); 368 369 if (set) 370 val |= BIT(chan->id); 371 else 372 val &= ~BIT(chan->id); 373 374 iowrite32(val, chan->chip->apb_regs + offset); 375 } 376 /* Called in chan locked context */ 377 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, 378 struct axi_dma_desc *first) 379 { 380 u32 priority = chan->chip->dw->hdata->priority[chan->id]; 381 struct axi_dma_chan_config config = {}; 382 u32 irq_mask; 383 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 384 385 if (unlikely(axi_chan_is_hw_enable(chan))) { 386 dev_err(chan2dev(chan), "%s is non-idle!\n", 387 axi_chan_name(chan)); 388 389 return; 390 } 391 392 axi_dma_enable(chan->chip); 393 394 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; 395 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; 396 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; 397 config.prior = priority; 398 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW; 399 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW; 400 switch (chan->direction) { 401 case DMA_MEM_TO_DEV: 402 dw_axi_dma_set_byte_halfword(chan, true); 403 config.tt_fc = chan->config.device_fc ? 404 DWAXIDMAC_TT_FC_MEM_TO_PER_DST : 405 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC; 406 if (chan->chip->apb_regs) 407 config.dst_per = chan->id; 408 else 409 config.dst_per = chan->hw_handshake_num; 410 break; 411 case DMA_DEV_TO_MEM: 412 config.tt_fc = chan->config.device_fc ? 413 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC : 414 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC; 415 if (chan->chip->apb_regs) 416 config.src_per = chan->id; 417 else 418 config.src_per = chan->hw_handshake_num; 419 break; 420 default: 421 break; 422 } 423 axi_chan_config_write(chan, &config); 424 425 write_chan_llp(chan, first->hw_desc[0].llp | lms); 426 427 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; 428 axi_chan_irq_sig_set(chan, irq_mask); 429 430 /* Generate 'suspend' status but don't generate interrupt */ 431 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; 432 axi_chan_irq_set(chan, irq_mask); 433 434 axi_chan_enable(chan); 435 } 436 437 static void axi_chan_start_first_queued(struct axi_dma_chan *chan) 438 { 439 struct axi_dma_desc *desc; 440 struct virt_dma_desc *vd; 441 442 vd = vchan_next_desc(&chan->vc); 443 if (!vd) 444 return; 445 446 desc = vd_to_axi_desc(vd); 447 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan), 448 vd->tx.cookie); 449 axi_chan_block_xfer_start(chan, desc); 450 } 451 452 static void dma_chan_issue_pending(struct dma_chan *dchan) 453 { 454 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 455 unsigned long flags; 456 457 spin_lock_irqsave(&chan->vc.lock, flags); 458 if (vchan_issue_pending(&chan->vc)) 459 axi_chan_start_first_queued(chan); 460 spin_unlock_irqrestore(&chan->vc.lock, flags); 461 } 462 463 static void dw_axi_dma_synchronize(struct dma_chan *dchan) 464 { 465 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 466 467 vchan_synchronize(&chan->vc); 468 } 469 470 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) 471 { 472 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 473 474 /* ASSERT: channel is idle */ 475 if (axi_chan_is_hw_enable(chan)) { 476 dev_err(chan2dev(chan), "%s is non-idle!\n", 477 axi_chan_name(chan)); 478 return -EBUSY; 479 } 480 481 /* LLI address must be aligned to a 64-byte boundary */ 482 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)), 483 chan->chip->dev, 484 sizeof(struct axi_dma_lli), 485 64, 0); 486 if (!chan->desc_pool) { 487 dev_err(chan2dev(chan), "No memory for descriptors\n"); 488 return -ENOMEM; 489 } 490 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan)); 491 492 pm_runtime_get(chan->chip->dev); 493 494 return 0; 495 } 496 497 static void dma_chan_free_chan_resources(struct dma_chan *dchan) 498 { 499 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 500 501 /* ASSERT: channel is idle */ 502 if (axi_chan_is_hw_enable(chan)) 503 dev_err(dchan2dev(dchan), "%s is non-idle!\n", 504 axi_chan_name(chan)); 505 506 axi_chan_disable(chan); 507 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL); 508 509 vchan_free_chan_resources(&chan->vc); 510 511 dma_pool_destroy(chan->desc_pool); 512 chan->desc_pool = NULL; 513 dev_vdbg(dchan2dev(dchan), 514 "%s: free resources, descriptor still allocated: %u\n", 515 axi_chan_name(chan), atomic_read(&chan->descs_allocated)); 516 517 pm_runtime_put(chan->chip->dev); 518 } 519 520 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set) 521 { 522 struct axi_dma_chip *chip = chan->chip; 523 unsigned long reg_value, val; 524 525 if (!chip->apb_regs) { 526 dev_err(chip->dev, "apb_regs not initialized\n"); 527 return; 528 } 529 530 /* 531 * An unused DMA channel has a default value of 0x3F. 532 * Lock the DMA channel by assign a handshake number to the channel. 533 * Unlock the DMA channel by assign 0x3F to the channel. 534 */ 535 if (set) 536 val = chan->hw_handshake_num; 537 else 538 val = UNUSED_CHANNEL; 539 540 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0); 541 542 /* Channel is already allocated, set handshake as per channel ID */ 543 /* 64 bit write should handle for 8 channels */ 544 545 reg_value &= ~(DMA_APB_HS_SEL_MASK << 546 (chan->id * DMA_APB_HS_SEL_BIT_SIZE)); 547 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE)); 548 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0); 549 550 return; 551 } 552 553 /* 554 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI 555 * as 1, it understands that the current block is the final block in the 556 * transfer and completes the DMA transfer operation at the end of current 557 * block transfer. 558 */ 559 static void set_desc_last(struct axi_dma_hw_desc *desc) 560 { 561 u32 val; 562 563 val = le32_to_cpu(desc->lli->ctl_hi); 564 val |= CH_CTL_H_LLI_LAST; 565 desc->lli->ctl_hi = cpu_to_le32(val); 566 } 567 568 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr) 569 { 570 desc->lli->sar = cpu_to_le64(adr); 571 } 572 573 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr) 574 { 575 desc->lli->dar = cpu_to_le64(adr); 576 } 577 578 static void set_desc_src_master(struct axi_dma_hw_desc *desc) 579 { 580 u32 val; 581 582 /* Select AXI0 for source master */ 583 val = le32_to_cpu(desc->lli->ctl_lo); 584 val &= ~CH_CTL_L_SRC_MAST; 585 desc->lli->ctl_lo = cpu_to_le32(val); 586 } 587 588 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc, 589 struct axi_dma_desc *desc) 590 { 591 u32 val; 592 593 /* Select AXI1 for source master if available */ 594 val = le32_to_cpu(hw_desc->lli->ctl_lo); 595 if (desc->chan->chip->dw->hdata->nr_masters > 1) 596 val |= CH_CTL_L_DST_MAST; 597 else 598 val &= ~CH_CTL_L_DST_MAST; 599 600 hw_desc->lli->ctl_lo = cpu_to_le32(val); 601 } 602 603 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, 604 struct axi_dma_hw_desc *hw_desc, 605 dma_addr_t mem_addr, size_t len) 606 { 607 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width); 608 unsigned int reg_width; 609 unsigned int mem_width; 610 dma_addr_t device_addr; 611 size_t axi_block_ts; 612 size_t block_ts; 613 u32 ctllo, ctlhi; 614 u32 burst_len; 615 616 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; 617 618 mem_width = __ffs(data_width | mem_addr | len); 619 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) 620 mem_width = DWAXIDMAC_TRANS_WIDTH_32; 621 622 if (!IS_ALIGNED(mem_addr, 4)) { 623 dev_err(chan->chip->dev, "invalid buffer alignment\n"); 624 return -EINVAL; 625 } 626 627 switch (chan->direction) { 628 case DMA_MEM_TO_DEV: 629 reg_width = __ffs(chan->config.dst_addr_width); 630 device_addr = chan->config.dst_addr; 631 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS | 632 mem_width << CH_CTL_L_SRC_WIDTH_POS | 633 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS | 634 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS; 635 block_ts = len >> mem_width; 636 break; 637 case DMA_DEV_TO_MEM: 638 reg_width = __ffs(chan->config.src_addr_width); 639 device_addr = chan->config.src_addr; 640 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS | 641 mem_width << CH_CTL_L_DST_WIDTH_POS | 642 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | 643 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS; 644 block_ts = len >> reg_width; 645 break; 646 default: 647 return -EINVAL; 648 } 649 650 if (block_ts > axi_block_ts) 651 return -EINVAL; 652 653 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp); 654 if (unlikely(!hw_desc->lli)) 655 return -ENOMEM; 656 657 ctlhi = CH_CTL_H_LLI_VALID; 658 659 if (chan->chip->dw->hdata->restrict_axi_burst_len) { 660 burst_len = chan->chip->dw->hdata->axi_rw_burst_len; 661 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN | 662 burst_len << CH_CTL_H_ARLEN_POS | 663 burst_len << CH_CTL_H_AWLEN_POS; 664 } 665 666 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi); 667 668 if (chan->direction == DMA_MEM_TO_DEV) { 669 write_desc_sar(hw_desc, mem_addr); 670 write_desc_dar(hw_desc, device_addr); 671 } else { 672 write_desc_sar(hw_desc, device_addr); 673 write_desc_dar(hw_desc, mem_addr); 674 } 675 676 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); 677 678 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | 679 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS; 680 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); 681 682 set_desc_src_master(hw_desc); 683 684 hw_desc->len = len; 685 return 0; 686 } 687 688 static size_t calculate_block_len(struct axi_dma_chan *chan, 689 dma_addr_t dma_addr, size_t buf_len, 690 enum dma_transfer_direction direction) 691 { 692 u32 data_width, reg_width, mem_width; 693 size_t axi_block_ts, block_len; 694 695 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; 696 697 switch (direction) { 698 case DMA_MEM_TO_DEV: 699 data_width = BIT(chan->chip->dw->hdata->m_data_width); 700 mem_width = __ffs(data_width | dma_addr | buf_len); 701 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) 702 mem_width = DWAXIDMAC_TRANS_WIDTH_32; 703 704 block_len = axi_block_ts << mem_width; 705 break; 706 case DMA_DEV_TO_MEM: 707 reg_width = __ffs(chan->config.src_addr_width); 708 block_len = axi_block_ts << reg_width; 709 break; 710 default: 711 block_len = 0; 712 } 713 714 return block_len; 715 } 716 717 static struct dma_async_tx_descriptor * 718 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, 719 size_t buf_len, size_t period_len, 720 enum dma_transfer_direction direction, 721 unsigned long flags) 722 { 723 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 724 struct axi_dma_hw_desc *hw_desc = NULL; 725 struct axi_dma_desc *desc = NULL; 726 dma_addr_t src_addr = dma_addr; 727 u32 num_periods, num_segments; 728 size_t axi_block_len; 729 u32 total_segments; 730 u32 segment_len; 731 unsigned int i; 732 int status; 733 u64 llp = 0; 734 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 735 736 num_periods = buf_len / period_len; 737 738 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction); 739 if (axi_block_len == 0) 740 return NULL; 741 742 num_segments = DIV_ROUND_UP(period_len, axi_block_len); 743 segment_len = DIV_ROUND_UP(period_len, num_segments); 744 745 total_segments = num_periods * num_segments; 746 747 desc = axi_desc_alloc(total_segments); 748 if (unlikely(!desc)) 749 goto err_desc_get; 750 751 chan->direction = direction; 752 desc->chan = chan; 753 chan->cyclic = true; 754 desc->length = 0; 755 desc->period_len = period_len; 756 757 for (i = 0; i < total_segments; i++) { 758 hw_desc = &desc->hw_desc[i]; 759 760 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr, 761 segment_len); 762 if (status < 0) 763 goto err_desc_get; 764 765 desc->length += hw_desc->len; 766 /* Set end-of-link to the linked descriptor, so that cyclic 767 * callback function can be triggered during interrupt. 768 */ 769 set_desc_last(hw_desc); 770 771 src_addr += segment_len; 772 } 773 774 llp = desc->hw_desc[0].llp; 775 776 /* Managed transfer list */ 777 do { 778 hw_desc = &desc->hw_desc[--total_segments]; 779 write_desc_llp(hw_desc, llp | lms); 780 llp = hw_desc->llp; 781 } while (total_segments); 782 783 dw_axi_dma_set_hw_channel(chan, true); 784 785 return vchan_tx_prep(&chan->vc, &desc->vd, flags); 786 787 err_desc_get: 788 if (desc) 789 axi_desc_put(desc); 790 791 return NULL; 792 } 793 794 static struct dma_async_tx_descriptor * 795 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 796 unsigned int sg_len, 797 enum dma_transfer_direction direction, 798 unsigned long flags, void *context) 799 { 800 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 801 struct axi_dma_hw_desc *hw_desc = NULL; 802 struct axi_dma_desc *desc = NULL; 803 u32 num_segments, segment_len; 804 unsigned int loop = 0; 805 struct scatterlist *sg; 806 size_t axi_block_len; 807 u32 len, num_sgs = 0; 808 unsigned int i; 809 dma_addr_t mem; 810 int status; 811 u64 llp = 0; 812 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 813 814 if (unlikely(!is_slave_direction(direction) || !sg_len)) 815 return NULL; 816 817 mem = sg_dma_address(sgl); 818 len = sg_dma_len(sgl); 819 820 axi_block_len = calculate_block_len(chan, mem, len, direction); 821 if (axi_block_len == 0) 822 return NULL; 823 824 for_each_sg(sgl, sg, sg_len, i) 825 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); 826 827 desc = axi_desc_alloc(num_sgs); 828 if (unlikely(!desc)) 829 goto err_desc_get; 830 831 desc->chan = chan; 832 desc->length = 0; 833 chan->direction = direction; 834 835 for_each_sg(sgl, sg, sg_len, i) { 836 mem = sg_dma_address(sg); 837 len = sg_dma_len(sg); 838 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); 839 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments); 840 841 do { 842 hw_desc = &desc->hw_desc[loop++]; 843 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len); 844 if (status < 0) 845 goto err_desc_get; 846 847 desc->length += hw_desc->len; 848 len -= segment_len; 849 mem += segment_len; 850 } while (len >= segment_len); 851 } 852 853 /* Set end-of-link to the last link descriptor of list */ 854 set_desc_last(&desc->hw_desc[num_sgs - 1]); 855 856 /* Managed transfer list */ 857 do { 858 hw_desc = &desc->hw_desc[--num_sgs]; 859 write_desc_llp(hw_desc, llp | lms); 860 llp = hw_desc->llp; 861 } while (num_sgs); 862 863 dw_axi_dma_set_hw_channel(chan, true); 864 865 return vchan_tx_prep(&chan->vc, &desc->vd, flags); 866 867 err_desc_get: 868 if (desc) 869 axi_desc_put(desc); 870 871 return NULL; 872 } 873 874 static struct dma_async_tx_descriptor * 875 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, 876 dma_addr_t src_adr, size_t len, unsigned long flags) 877 { 878 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 879 size_t block_ts, max_block_ts, xfer_len; 880 struct axi_dma_hw_desc *hw_desc = NULL; 881 struct axi_dma_desc *desc = NULL; 882 u32 xfer_width, reg, num; 883 u64 llp = 0; 884 u8 lms = 0; /* Select AXI0 master for LLI fetching */ 885 886 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx", 887 axi_chan_name(chan), &src_adr, &dst_adr, len, flags); 888 889 max_block_ts = chan->chip->dw->hdata->block_size[chan->id]; 890 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len); 891 num = DIV_ROUND_UP(len, max_block_ts << xfer_width); 892 desc = axi_desc_alloc(num); 893 if (unlikely(!desc)) 894 goto err_desc_get; 895 896 desc->chan = chan; 897 num = 0; 898 desc->length = 0; 899 while (len) { 900 xfer_len = len; 901 902 hw_desc = &desc->hw_desc[num]; 903 /* 904 * Take care for the alignment. 905 * Actually source and destination widths can be different, but 906 * make them same to be simpler. 907 */ 908 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len); 909 910 /* 911 * block_ts indicates the total number of data of width 912 * to be transferred in a DMA block transfer. 913 * BLOCK_TS register should be set to block_ts - 1 914 */ 915 block_ts = xfer_len >> xfer_width; 916 if (block_ts > max_block_ts) { 917 block_ts = max_block_ts; 918 xfer_len = max_block_ts << xfer_width; 919 } 920 921 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp); 922 if (unlikely(!hw_desc->lli)) 923 goto err_desc_get; 924 925 write_desc_sar(hw_desc, src_adr); 926 write_desc_dar(hw_desc, dst_adr); 927 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); 928 929 reg = CH_CTL_H_LLI_VALID; 930 if (chan->chip->dw->hdata->restrict_axi_burst_len) { 931 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len; 932 933 reg |= (CH_CTL_H_ARLEN_EN | 934 burst_len << CH_CTL_H_ARLEN_POS | 935 CH_CTL_H_AWLEN_EN | 936 burst_len << CH_CTL_H_AWLEN_POS); 937 } 938 hw_desc->lli->ctl_hi = cpu_to_le32(reg); 939 940 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | 941 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS | 942 xfer_width << CH_CTL_L_DST_WIDTH_POS | 943 xfer_width << CH_CTL_L_SRC_WIDTH_POS | 944 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | 945 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS); 946 hw_desc->lli->ctl_lo = cpu_to_le32(reg); 947 948 set_desc_src_master(hw_desc); 949 set_desc_dest_master(hw_desc, desc); 950 951 hw_desc->len = xfer_len; 952 desc->length += hw_desc->len; 953 /* update the length and addresses for the next loop cycle */ 954 len -= xfer_len; 955 dst_adr += xfer_len; 956 src_adr += xfer_len; 957 num++; 958 } 959 960 /* Set end-of-link to the last link descriptor of list */ 961 set_desc_last(&desc->hw_desc[num - 1]); 962 /* Managed transfer list */ 963 do { 964 hw_desc = &desc->hw_desc[--num]; 965 write_desc_llp(hw_desc, llp | lms); 966 llp = hw_desc->llp; 967 } while (num); 968 969 return vchan_tx_prep(&chan->vc, &desc->vd, flags); 970 971 err_desc_get: 972 if (desc) 973 axi_desc_put(desc); 974 return NULL; 975 } 976 977 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan, 978 struct dma_slave_config *config) 979 { 980 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 981 982 memcpy(&chan->config, config, sizeof(*config)); 983 984 return 0; 985 } 986 987 static void axi_chan_dump_lli(struct axi_dma_chan *chan, 988 struct axi_dma_hw_desc *desc) 989 { 990 if (!desc->lli) { 991 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n"); 992 return; 993 } 994 995 dev_err(dchan2dev(&chan->vc.chan), 996 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x", 997 le64_to_cpu(desc->lli->sar), 998 le64_to_cpu(desc->lli->dar), 999 le64_to_cpu(desc->lli->llp), 1000 le32_to_cpu(desc->lli->block_ts_lo), 1001 le32_to_cpu(desc->lli->ctl_hi), 1002 le32_to_cpu(desc->lli->ctl_lo)); 1003 } 1004 1005 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, 1006 struct axi_dma_desc *desc_head) 1007 { 1008 int count = atomic_read(&chan->descs_allocated); 1009 int i; 1010 1011 for (i = 0; i < count; i++) 1012 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]); 1013 } 1014 1015 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) 1016 { 1017 struct virt_dma_desc *vd; 1018 unsigned long flags; 1019 1020 spin_lock_irqsave(&chan->vc.lock, flags); 1021 1022 axi_chan_disable(chan); 1023 1024 /* The bad descriptor currently is in the head of vc list */ 1025 vd = vchan_next_desc(&chan->vc); 1026 if (!vd) { 1027 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", 1028 axi_chan_name(chan)); 1029 goto out; 1030 } 1031 /* Remove the completed descriptor from issued list */ 1032 list_del(&vd->node); 1033 1034 /* WARN about bad descriptor */ 1035 dev_err(chan2dev(chan), 1036 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n", 1037 axi_chan_name(chan), vd->tx.cookie, status); 1038 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd)); 1039 1040 vchan_cookie_complete(vd); 1041 1042 /* Try to restart the controller */ 1043 axi_chan_start_first_queued(chan); 1044 1045 out: 1046 spin_unlock_irqrestore(&chan->vc.lock, flags); 1047 } 1048 1049 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) 1050 { 1051 int count = atomic_read(&chan->descs_allocated); 1052 struct axi_dma_hw_desc *hw_desc; 1053 struct axi_dma_desc *desc; 1054 struct virt_dma_desc *vd; 1055 unsigned long flags; 1056 u64 llp; 1057 int i; 1058 1059 spin_lock_irqsave(&chan->vc.lock, flags); 1060 if (unlikely(axi_chan_is_hw_enable(chan))) { 1061 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n", 1062 axi_chan_name(chan)); 1063 axi_chan_disable(chan); 1064 } 1065 1066 /* The completed descriptor currently is in the head of vc list */ 1067 vd = vchan_next_desc(&chan->vc); 1068 if (!vd) { 1069 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", 1070 axi_chan_name(chan)); 1071 goto out; 1072 } 1073 1074 if (chan->cyclic) { 1075 desc = vd_to_axi_desc(vd); 1076 if (desc) { 1077 llp = lo_hi_readq(chan->chan_regs + CH_LLP); 1078 for (i = 0; i < count; i++) { 1079 hw_desc = &desc->hw_desc[i]; 1080 if (hw_desc->llp == llp) { 1081 axi_chan_irq_clear(chan, hw_desc->lli->status_lo); 1082 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID; 1083 desc->completed_blocks = i; 1084 1085 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0) 1086 vchan_cyclic_callback(vd); 1087 break; 1088 } 1089 } 1090 1091 axi_chan_enable(chan); 1092 } 1093 } else { 1094 /* Remove the completed descriptor from issued list before completing */ 1095 list_del(&vd->node); 1096 vchan_cookie_complete(vd); 1097 1098 /* Submit queued descriptors after processing the completed ones */ 1099 axi_chan_start_first_queued(chan); 1100 } 1101 1102 out: 1103 spin_unlock_irqrestore(&chan->vc.lock, flags); 1104 } 1105 1106 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) 1107 { 1108 struct axi_dma_chip *chip = dev_id; 1109 struct dw_axi_dma *dw = chip->dw; 1110 struct axi_dma_chan *chan; 1111 1112 u32 status, i; 1113 1114 /* Disable DMAC interrupts. We'll enable them after processing channels */ 1115 axi_dma_irq_disable(chip); 1116 1117 /* Poll, clear and process every channel interrupt status */ 1118 for (i = 0; i < dw->hdata->nr_channels; i++) { 1119 chan = &dw->chan[i]; 1120 status = axi_chan_irq_read(chan); 1121 axi_chan_irq_clear(chan, status); 1122 1123 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n", 1124 axi_chan_name(chan), i, status); 1125 1126 if (status & DWAXIDMAC_IRQ_ALL_ERR) 1127 axi_chan_handle_err(chan, status); 1128 else if (status & DWAXIDMAC_IRQ_DMA_TRF) 1129 axi_chan_block_xfer_complete(chan); 1130 } 1131 1132 /* Re-enable interrupts */ 1133 axi_dma_irq_enable(chip); 1134 1135 return IRQ_HANDLED; 1136 } 1137 1138 static int dma_chan_terminate_all(struct dma_chan *dchan) 1139 { 1140 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 1141 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT; 1142 unsigned long flags; 1143 u32 val; 1144 int ret; 1145 LIST_HEAD(head); 1146 1147 axi_chan_disable(chan); 1148 1149 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, 1150 !(val & chan_active), 1000, 50000); 1151 if (ret == -ETIMEDOUT) 1152 dev_warn(dchan2dev(dchan), 1153 "%s failed to stop\n", axi_chan_name(chan)); 1154 1155 if (chan->direction != DMA_MEM_TO_MEM) 1156 dw_axi_dma_set_hw_channel(chan, false); 1157 if (chan->direction == DMA_MEM_TO_DEV) 1158 dw_axi_dma_set_byte_halfword(chan, false); 1159 1160 spin_lock_irqsave(&chan->vc.lock, flags); 1161 1162 vchan_get_all_descriptors(&chan->vc, &head); 1163 1164 chan->cyclic = false; 1165 spin_unlock_irqrestore(&chan->vc.lock, flags); 1166 1167 vchan_dma_desc_free_list(&chan->vc, &head); 1168 1169 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); 1170 1171 return 0; 1172 } 1173 1174 static int dma_chan_pause(struct dma_chan *dchan) 1175 { 1176 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 1177 unsigned long flags; 1178 unsigned int timeout = 20; /* timeout iterations */ 1179 u32 val; 1180 1181 spin_lock_irqsave(&chan->vc.lock, flags); 1182 1183 if (chan->chip->dw->hdata->reg_map_8_channels) { 1184 val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1185 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | 1186 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; 1187 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 1188 } else { 1189 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1190 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | 1191 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; 1192 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); 1193 } 1194 1195 do { 1196 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) 1197 break; 1198 1199 udelay(2); 1200 } while (--timeout); 1201 1202 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED); 1203 1204 chan->is_paused = true; 1205 1206 spin_unlock_irqrestore(&chan->vc.lock, flags); 1207 1208 return timeout ? 0 : -EAGAIN; 1209 } 1210 1211 /* Called in chan locked context */ 1212 static inline void axi_chan_resume(struct axi_dma_chan *chan) 1213 { 1214 u32 val; 1215 1216 if (chan->chip->dw->hdata->reg_map_8_channels) { 1217 val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1218 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); 1219 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); 1220 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 1221 } else { 1222 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1223 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); 1224 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); 1225 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); 1226 } 1227 1228 chan->is_paused = false; 1229 } 1230 1231 static int dma_chan_resume(struct dma_chan *dchan) 1232 { 1233 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 1234 unsigned long flags; 1235 1236 spin_lock_irqsave(&chan->vc.lock, flags); 1237 1238 if (chan->is_paused) 1239 axi_chan_resume(chan); 1240 1241 spin_unlock_irqrestore(&chan->vc.lock, flags); 1242 1243 return 0; 1244 } 1245 1246 static int axi_dma_suspend(struct axi_dma_chip *chip) 1247 { 1248 axi_dma_irq_disable(chip); 1249 axi_dma_disable(chip); 1250 1251 clk_disable_unprepare(chip->core_clk); 1252 clk_disable_unprepare(chip->cfgr_clk); 1253 1254 return 0; 1255 } 1256 1257 static int axi_dma_resume(struct axi_dma_chip *chip) 1258 { 1259 int ret; 1260 1261 ret = clk_prepare_enable(chip->cfgr_clk); 1262 if (ret < 0) 1263 return ret; 1264 1265 ret = clk_prepare_enable(chip->core_clk); 1266 if (ret < 0) 1267 return ret; 1268 1269 axi_dma_enable(chip); 1270 axi_dma_irq_enable(chip); 1271 1272 return 0; 1273 } 1274 1275 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev) 1276 { 1277 struct axi_dma_chip *chip = dev_get_drvdata(dev); 1278 1279 return axi_dma_suspend(chip); 1280 } 1281 1282 static int __maybe_unused axi_dma_runtime_resume(struct device *dev) 1283 { 1284 struct axi_dma_chip *chip = dev_get_drvdata(dev); 1285 1286 return axi_dma_resume(chip); 1287 } 1288 1289 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec, 1290 struct of_dma *ofdma) 1291 { 1292 struct dw_axi_dma *dw = ofdma->of_dma_data; 1293 struct axi_dma_chan *chan; 1294 struct dma_chan *dchan; 1295 1296 dchan = dma_get_any_slave_channel(&dw->dma); 1297 if (!dchan) 1298 return NULL; 1299 1300 chan = dchan_to_axi_dma_chan(dchan); 1301 chan->hw_handshake_num = dma_spec->args[0]; 1302 return dchan; 1303 } 1304 1305 static int parse_device_properties(struct axi_dma_chip *chip) 1306 { 1307 struct device *dev = chip->dev; 1308 u32 tmp, carr[DMAC_MAX_CHANNELS]; 1309 int ret; 1310 1311 ret = device_property_read_u32(dev, "dma-channels", &tmp); 1312 if (ret) 1313 return ret; 1314 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS) 1315 return -EINVAL; 1316 1317 chip->dw->hdata->nr_channels = tmp; 1318 if (tmp <= DMA_REG_MAP_CH_REF) 1319 chip->dw->hdata->reg_map_8_channels = true; 1320 1321 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp); 1322 if (ret) 1323 return ret; 1324 if (tmp == 0 || tmp > DMAC_MAX_MASTERS) 1325 return -EINVAL; 1326 1327 chip->dw->hdata->nr_masters = tmp; 1328 1329 ret = device_property_read_u32(dev, "snps,data-width", &tmp); 1330 if (ret) 1331 return ret; 1332 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX) 1333 return -EINVAL; 1334 1335 chip->dw->hdata->m_data_width = tmp; 1336 1337 ret = device_property_read_u32_array(dev, "snps,block-size", carr, 1338 chip->dw->hdata->nr_channels); 1339 if (ret) 1340 return ret; 1341 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { 1342 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE) 1343 return -EINVAL; 1344 1345 chip->dw->hdata->block_size[tmp] = carr[tmp]; 1346 } 1347 1348 ret = device_property_read_u32_array(dev, "snps,priority", carr, 1349 chip->dw->hdata->nr_channels); 1350 if (ret) 1351 return ret; 1352 /* Priority value must be programmed within [0:nr_channels-1] range */ 1353 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { 1354 if (carr[tmp] >= chip->dw->hdata->nr_channels) 1355 return -EINVAL; 1356 1357 chip->dw->hdata->priority[tmp] = carr[tmp]; 1358 } 1359 1360 /* axi-max-burst-len is optional property */ 1361 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp); 1362 if (!ret) { 1363 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1) 1364 return -EINVAL; 1365 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1) 1366 return -EINVAL; 1367 1368 chip->dw->hdata->restrict_axi_burst_len = true; 1369 chip->dw->hdata->axi_rw_burst_len = tmp; 1370 } 1371 1372 return 0; 1373 } 1374 1375 static int dw_probe(struct platform_device *pdev) 1376 { 1377 struct axi_dma_chip *chip; 1378 struct dw_axi_dma *dw; 1379 struct dw_axi_dma_hcfg *hdata; 1380 struct reset_control *resets; 1381 unsigned int flags; 1382 u32 i; 1383 int ret; 1384 1385 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 1386 if (!chip) 1387 return -ENOMEM; 1388 1389 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL); 1390 if (!dw) 1391 return -ENOMEM; 1392 1393 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL); 1394 if (!hdata) 1395 return -ENOMEM; 1396 1397 chip->dw = dw; 1398 chip->dev = &pdev->dev; 1399 chip->dw->hdata = hdata; 1400 1401 chip->irq = platform_get_irq(pdev, 0); 1402 if (chip->irq < 0) 1403 return chip->irq; 1404 1405 chip->regs = devm_platform_ioremap_resource(pdev, 0); 1406 if (IS_ERR(chip->regs)) 1407 return PTR_ERR(chip->regs); 1408 1409 flags = (uintptr_t)of_device_get_match_data(&pdev->dev); 1410 if (flags & AXI_DMA_FLAG_HAS_APB_REGS) { 1411 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1); 1412 if (IS_ERR(chip->apb_regs)) 1413 return PTR_ERR(chip->apb_regs); 1414 } 1415 1416 if (flags & AXI_DMA_FLAG_HAS_RESETS) { 1417 resets = devm_reset_control_array_get_exclusive(&pdev->dev); 1418 if (IS_ERR(resets)) 1419 return PTR_ERR(resets); 1420 1421 ret = reset_control_deassert(resets); 1422 if (ret) 1423 return ret; 1424 } 1425 1426 chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2); 1427 1428 chip->core_clk = devm_clk_get(chip->dev, "core-clk"); 1429 if (IS_ERR(chip->core_clk)) 1430 return PTR_ERR(chip->core_clk); 1431 1432 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk"); 1433 if (IS_ERR(chip->cfgr_clk)) 1434 return PTR_ERR(chip->cfgr_clk); 1435 1436 ret = parse_device_properties(chip); 1437 if (ret) 1438 return ret; 1439 1440 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels, 1441 sizeof(*dw->chan), GFP_KERNEL); 1442 if (!dw->chan) 1443 return -ENOMEM; 1444 1445 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, 1446 IRQF_SHARED, KBUILD_MODNAME, chip); 1447 if (ret) 1448 return ret; 1449 1450 INIT_LIST_HEAD(&dw->dma.channels); 1451 for (i = 0; i < hdata->nr_channels; i++) { 1452 struct axi_dma_chan *chan = &dw->chan[i]; 1453 1454 chan->chip = chip; 1455 chan->id = i; 1456 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN; 1457 atomic_set(&chan->descs_allocated, 0); 1458 1459 chan->vc.desc_free = vchan_desc_put; 1460 vchan_init(&chan->vc, &dw->dma); 1461 } 1462 1463 /* Set capabilities */ 1464 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1465 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1466 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask); 1467 1468 /* DMA capabilities */ 1469 dw->dma.chancnt = hdata->nr_channels; 1470 dw->dma.max_burst = hdata->axi_rw_burst_len; 1471 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS; 1472 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS; 1473 dw->dma.directions = BIT(DMA_MEM_TO_MEM); 1474 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1475 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1476 1477 dw->dma.dev = chip->dev; 1478 dw->dma.device_tx_status = dma_chan_tx_status; 1479 dw->dma.device_issue_pending = dma_chan_issue_pending; 1480 dw->dma.device_terminate_all = dma_chan_terminate_all; 1481 dw->dma.device_pause = dma_chan_pause; 1482 dw->dma.device_resume = dma_chan_resume; 1483 1484 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources; 1485 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources; 1486 1487 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy; 1488 dw->dma.device_synchronize = dw_axi_dma_synchronize; 1489 dw->dma.device_config = dw_axi_dma_chan_slave_config; 1490 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg; 1491 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic; 1492 1493 /* 1494 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum 1495 * supported blocks is 1024. Device register width is 4 bytes. 1496 * Therefore, set constraint to 1024 * 4. 1497 */ 1498 dw->dma.dev->dma_parms = &dw->dma_parms; 1499 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE); 1500 platform_set_drvdata(pdev, chip); 1501 1502 pm_runtime_enable(chip->dev); 1503 1504 /* 1505 * We can't just call pm_runtime_get here instead of 1506 * pm_runtime_get_noresume + axi_dma_resume because we need 1507 * driver to work also without Runtime PM. 1508 */ 1509 pm_runtime_get_noresume(chip->dev); 1510 ret = axi_dma_resume(chip); 1511 if (ret < 0) 1512 goto err_pm_disable; 1513 1514 axi_dma_hw_init(chip); 1515 1516 pm_runtime_put(chip->dev); 1517 1518 ret = dmaenginem_async_device_register(&dw->dma); 1519 if (ret) 1520 goto err_pm_disable; 1521 1522 /* Register with OF helpers for DMA lookups */ 1523 ret = of_dma_controller_register(pdev->dev.of_node, 1524 dw_axi_dma_of_xlate, dw); 1525 if (ret < 0) 1526 dev_warn(&pdev->dev, 1527 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n"); 1528 1529 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n", 1530 dw->hdata->nr_channels); 1531 1532 return 0; 1533 1534 err_pm_disable: 1535 pm_runtime_disable(chip->dev); 1536 1537 return ret; 1538 } 1539 1540 static int dw_remove(struct platform_device *pdev) 1541 { 1542 struct axi_dma_chip *chip = platform_get_drvdata(pdev); 1543 struct dw_axi_dma *dw = chip->dw; 1544 struct axi_dma_chan *chan, *_chan; 1545 u32 i; 1546 1547 /* Enable clk before accessing to registers */ 1548 clk_prepare_enable(chip->cfgr_clk); 1549 clk_prepare_enable(chip->core_clk); 1550 axi_dma_irq_disable(chip); 1551 for (i = 0; i < dw->hdata->nr_channels; i++) { 1552 axi_chan_disable(&chip->dw->chan[i]); 1553 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); 1554 } 1555 axi_dma_disable(chip); 1556 1557 pm_runtime_disable(chip->dev); 1558 axi_dma_suspend(chip); 1559 1560 devm_free_irq(chip->dev, chip->irq, chip); 1561 1562 of_dma_controller_free(chip->dev->of_node); 1563 1564 list_for_each_entry_safe(chan, _chan, &dw->dma.channels, 1565 vc.chan.device_node) { 1566 list_del(&chan->vc.chan.device_node); 1567 tasklet_kill(&chan->vc.task); 1568 } 1569 1570 return 0; 1571 } 1572 1573 static const struct dev_pm_ops dw_axi_dma_pm_ops = { 1574 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL) 1575 }; 1576 1577 static const struct of_device_id dw_dma_of_id_table[] = { 1578 { 1579 .compatible = "snps,axi-dma-1.01a" 1580 }, { 1581 .compatible = "intel,kmb-axi-dma", 1582 .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS, 1583 }, { 1584 .compatible = "starfive,jh7110-axi-dma", 1585 .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), 1586 }, 1587 {} 1588 }; 1589 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); 1590 1591 static struct platform_driver dw_driver = { 1592 .probe = dw_probe, 1593 .remove = dw_remove, 1594 .driver = { 1595 .name = KBUILD_MODNAME, 1596 .of_match_table = dw_dma_of_id_table, 1597 .pm = &dw_axi_dma_pm_ops, 1598 }, 1599 }; 1600 module_platform_driver(dw_driver); 1601 1602 MODULE_LICENSE("GPL v2"); 1603 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver"); 1604 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); 1605