1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * DMA driver for Nvidia's Tegra20 APB DMA controller. 4 * 5 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/err.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/mm.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/of_dma.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/reset.h> 26 #include <linux/slab.h> 27 #include <linux/wait.h> 28 29 #include "dmaengine.h" 30 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/tegra_apb_dma.h> 33 34 #define TEGRA_APBDMA_GENERAL 0x0 35 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) 36 37 #define TEGRA_APBDMA_CONTROL 0x010 38 #define TEGRA_APBDMA_IRQ_MASK 0x01c 39 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 40 41 /* CSR register */ 42 #define TEGRA_APBDMA_CHAN_CSR 0x00 43 #define TEGRA_APBDMA_CSR_ENB BIT(31) 44 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) 45 #define TEGRA_APBDMA_CSR_HOLD BIT(29) 46 #define TEGRA_APBDMA_CSR_DIR BIT(28) 47 #define TEGRA_APBDMA_CSR_ONCE BIT(27) 48 #define TEGRA_APBDMA_CSR_FLOW BIT(21) 49 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 50 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F 51 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC 52 53 /* STATUS register */ 54 #define TEGRA_APBDMA_CHAN_STATUS 0x004 55 #define TEGRA_APBDMA_STATUS_BUSY BIT(31) 56 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) 57 #define TEGRA_APBDMA_STATUS_HALT BIT(29) 58 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) 59 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 60 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC 61 62 #define TEGRA_APBDMA_CHAN_CSRE 0x00C 63 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31) 64 65 /* AHB memory address */ 66 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 67 68 /* AHB sequence register */ 69 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 70 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) 71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) 72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) 73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) 74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) 75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) 76 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) 77 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) 78 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) 79 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) 80 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) 81 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 82 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 83 84 /* APB address */ 85 #define TEGRA_APBDMA_CHAN_APBPTR 0x018 86 87 /* APB sequence register */ 88 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c 89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) 90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) 91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) 92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) 93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) 94 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 95 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 96 97 /* Tegra148 specific registers */ 98 #define TEGRA_APBDMA_CHAN_WCOUNT 0x20 99 100 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 101 102 /* 103 * If any burst is in flight and DMA paused then this is the time to complete 104 * on-flight burst and update DMA status register. 105 */ 106 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 107 108 /* Channel base address offset from APBDMA base address */ 109 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 110 111 #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1) 112 113 struct tegra_dma; 114 115 /* 116 * tegra_dma_chip_data Tegra chip specific DMA data 117 * @nr_channels: Number of channels available in the controller. 118 * @channel_reg_size: Channel register size/stride. 119 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 120 * @support_channel_pause: Support channel wise pause of dma. 121 * @support_separate_wcount_reg: Support separate word count register. 122 */ 123 struct tegra_dma_chip_data { 124 unsigned int nr_channels; 125 unsigned int channel_reg_size; 126 unsigned int max_dma_count; 127 bool support_channel_pause; 128 bool support_separate_wcount_reg; 129 }; 130 131 /* DMA channel registers */ 132 struct tegra_dma_channel_regs { 133 u32 csr; 134 u32 ahb_ptr; 135 u32 apb_ptr; 136 u32 ahb_seq; 137 u32 apb_seq; 138 u32 wcount; 139 }; 140 141 /* 142 * tegra_dma_sg_req: DMA request details to configure hardware. This 143 * contains the details for one transfer to configure DMA hw. 144 * The client's request for data transfer can be broken into multiple 145 * sub-transfer as per requester details and hw support. 146 * This sub transfer get added in the list of transfer and point to Tegra 147 * DMA descriptor which manages the transfer details. 148 */ 149 struct tegra_dma_sg_req { 150 struct tegra_dma_channel_regs ch_regs; 151 unsigned int req_len; 152 bool configured; 153 bool last_sg; 154 struct list_head node; 155 struct tegra_dma_desc *dma_desc; 156 unsigned int words_xferred; 157 }; 158 159 /* 160 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. 161 * This descriptor keep track of transfer status, callbacks and request 162 * counts etc. 163 */ 164 struct tegra_dma_desc { 165 struct dma_async_tx_descriptor txd; 166 unsigned int bytes_requested; 167 unsigned int bytes_transferred; 168 enum dma_status dma_status; 169 struct list_head node; 170 struct list_head tx_list; 171 struct list_head cb_node; 172 unsigned int cb_count; 173 }; 174 175 struct tegra_dma_channel; 176 177 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, 178 bool to_terminate); 179 180 /* tegra_dma_channel: Channel specific information */ 181 struct tegra_dma_channel { 182 struct dma_chan dma_chan; 183 char name[12]; 184 bool config_init; 185 unsigned int id; 186 void __iomem *chan_addr; 187 spinlock_t lock; 188 bool busy; 189 struct tegra_dma *tdma; 190 bool cyclic; 191 192 /* Different lists for managing the requests */ 193 struct list_head free_sg_req; 194 struct list_head pending_sg_req; 195 struct list_head free_dma_desc; 196 struct list_head cb_desc; 197 198 /* ISR handler and tasklet for bottom half of isr handling */ 199 dma_isr_handler isr_handler; 200 struct tasklet_struct tasklet; 201 202 /* Channel-slave specific configuration */ 203 unsigned int slave_id; 204 struct dma_slave_config dma_sconfig; 205 struct tegra_dma_channel_regs channel_reg; 206 207 struct wait_queue_head wq; 208 }; 209 210 /* tegra_dma: Tegra DMA specific information */ 211 struct tegra_dma { 212 struct dma_device dma_dev; 213 struct device *dev; 214 struct clk *dma_clk; 215 struct reset_control *rst; 216 spinlock_t global_lock; 217 void __iomem *base_addr; 218 const struct tegra_dma_chip_data *chip_data; 219 220 /* 221 * Counter for managing global pausing of the DMA controller. 222 * Only applicable for devices that don't support individual 223 * channel pausing. 224 */ 225 u32 global_pause_count; 226 227 /* Last member of the structure */ 228 struct tegra_dma_channel channels[0]; 229 }; 230 231 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) 232 { 233 writel(val, tdma->base_addr + reg); 234 } 235 236 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) 237 { 238 return readl(tdma->base_addr + reg); 239 } 240 241 static inline void tdc_write(struct tegra_dma_channel *tdc, 242 u32 reg, u32 val) 243 { 244 writel(val, tdc->chan_addr + reg); 245 } 246 247 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 248 { 249 return readl(tdc->chan_addr + reg); 250 } 251 252 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 253 { 254 return container_of(dc, struct tegra_dma_channel, dma_chan); 255 } 256 257 static inline struct tegra_dma_desc * 258 txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td) 259 { 260 return container_of(td, struct tegra_dma_desc, txd); 261 } 262 263 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 264 { 265 return &tdc->dma_chan.dev->device; 266 } 267 268 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); 269 270 /* Get DMA desc from free list, if not there then allocate it. */ 271 static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc) 272 { 273 struct tegra_dma_desc *dma_desc; 274 unsigned long flags; 275 276 spin_lock_irqsave(&tdc->lock, flags); 277 278 /* Do not allocate if desc are waiting for ack */ 279 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 280 if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { 281 list_del(&dma_desc->node); 282 spin_unlock_irqrestore(&tdc->lock, flags); 283 dma_desc->txd.flags = 0; 284 return dma_desc; 285 } 286 } 287 288 spin_unlock_irqrestore(&tdc->lock, flags); 289 290 /* Allocate DMA desc */ 291 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); 292 if (!dma_desc) 293 return NULL; 294 295 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); 296 dma_desc->txd.tx_submit = tegra_dma_tx_submit; 297 dma_desc->txd.flags = 0; 298 299 return dma_desc; 300 } 301 302 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, 303 struct tegra_dma_desc *dma_desc) 304 { 305 unsigned long flags; 306 307 spin_lock_irqsave(&tdc->lock, flags); 308 if (!list_empty(&dma_desc->tx_list)) 309 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); 310 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 311 spin_unlock_irqrestore(&tdc->lock, flags); 312 } 313 314 static struct tegra_dma_sg_req * 315 tegra_dma_sg_req_get(struct tegra_dma_channel *tdc) 316 { 317 struct tegra_dma_sg_req *sg_req; 318 unsigned long flags; 319 320 spin_lock_irqsave(&tdc->lock, flags); 321 if (!list_empty(&tdc->free_sg_req)) { 322 sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req), 323 node); 324 list_del(&sg_req->node); 325 spin_unlock_irqrestore(&tdc->lock, flags); 326 return sg_req; 327 } 328 spin_unlock_irqrestore(&tdc->lock, flags); 329 330 sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT); 331 332 return sg_req; 333 } 334 335 static int tegra_dma_slave_config(struct dma_chan *dc, 336 struct dma_slave_config *sconfig) 337 { 338 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 339 340 if (!list_empty(&tdc->pending_sg_req)) { 341 dev_err(tdc2dev(tdc), "Configuration not allowed\n"); 342 return -EBUSY; 343 } 344 345 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 346 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID && 347 sconfig->device_fc) { 348 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK) 349 return -EINVAL; 350 tdc->slave_id = sconfig->slave_id; 351 } 352 tdc->config_init = true; 353 354 return 0; 355 } 356 357 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, 358 bool wait_for_burst_complete) 359 { 360 struct tegra_dma *tdma = tdc->tdma; 361 362 spin_lock(&tdma->global_lock); 363 364 if (tdc->tdma->global_pause_count == 0) { 365 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); 366 if (wait_for_burst_complete) 367 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 368 } 369 370 tdc->tdma->global_pause_count++; 371 372 spin_unlock(&tdma->global_lock); 373 } 374 375 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) 376 { 377 struct tegra_dma *tdma = tdc->tdma; 378 379 spin_lock(&tdma->global_lock); 380 381 if (WARN_ON(tdc->tdma->global_pause_count == 0)) 382 goto out; 383 384 if (--tdc->tdma->global_pause_count == 0) 385 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 386 TEGRA_APBDMA_GENERAL_ENABLE); 387 388 out: 389 spin_unlock(&tdma->global_lock); 390 } 391 392 static void tegra_dma_pause(struct tegra_dma_channel *tdc, 393 bool wait_for_burst_complete) 394 { 395 struct tegra_dma *tdma = tdc->tdma; 396 397 if (tdma->chip_data->support_channel_pause) { 398 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 399 TEGRA_APBDMA_CHAN_CSRE_PAUSE); 400 if (wait_for_burst_complete) 401 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 402 } else { 403 tegra_dma_global_pause(tdc, wait_for_burst_complete); 404 } 405 } 406 407 static void tegra_dma_resume(struct tegra_dma_channel *tdc) 408 { 409 struct tegra_dma *tdma = tdc->tdma; 410 411 if (tdma->chip_data->support_channel_pause) 412 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); 413 else 414 tegra_dma_global_resume(tdc); 415 } 416 417 static void tegra_dma_stop(struct tegra_dma_channel *tdc) 418 { 419 u32 csr, status; 420 421 /* Disable interrupts */ 422 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 423 csr &= ~TEGRA_APBDMA_CSR_IE_EOC; 424 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 425 426 /* Disable DMA */ 427 csr &= ~TEGRA_APBDMA_CSR_ENB; 428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 429 430 /* Clear interrupt status if it is there */ 431 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 432 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 433 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 434 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 435 } 436 tdc->busy = false; 437 } 438 439 static void tegra_dma_start(struct tegra_dma_channel *tdc, 440 struct tegra_dma_sg_req *sg_req) 441 { 442 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; 443 444 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); 445 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); 446 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 447 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 448 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 449 if (tdc->tdma->chip_data->support_separate_wcount_reg) 450 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount); 451 452 /* Start DMA */ 453 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 454 ch_regs->csr | TEGRA_APBDMA_CSR_ENB); 455 } 456 457 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, 458 struct tegra_dma_sg_req *nsg_req) 459 { 460 unsigned long status; 461 462 /* 463 * The DMA controller reloads the new configuration for next transfer 464 * after last burst of current transfer completes. 465 * If there is no IEC status then this makes sure that last burst 466 * has not be completed. There may be case that last burst is on 467 * flight and so it can complete but because DMA is paused, it 468 * will not generates interrupt as well as not reload the new 469 * configuration. 470 * If there is already IEC status then interrupt handler need to 471 * load new configuration. 472 */ 473 tegra_dma_pause(tdc, false); 474 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 475 476 /* 477 * If interrupt is pending then do nothing as the ISR will handle 478 * the programing for new request. 479 */ 480 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 481 dev_err(tdc2dev(tdc), 482 "Skipping new configuration as interrupt is pending\n"); 483 tegra_dma_resume(tdc); 484 return; 485 } 486 487 /* Safe to program new configuration */ 488 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 489 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 490 if (tdc->tdma->chip_data->support_separate_wcount_reg) 491 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, 492 nsg_req->ch_regs.wcount); 493 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 494 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 495 nsg_req->configured = true; 496 nsg_req->words_xferred = 0; 497 498 tegra_dma_resume(tdc); 499 } 500 501 static void tdc_start_head_req(struct tegra_dma_channel *tdc) 502 { 503 struct tegra_dma_sg_req *sg_req; 504 505 sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node); 506 tegra_dma_start(tdc, sg_req); 507 sg_req->configured = true; 508 sg_req->words_xferred = 0; 509 tdc->busy = true; 510 } 511 512 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) 513 { 514 struct tegra_dma_sg_req *hsgreq, *hnsgreq; 515 516 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 517 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { 518 hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq), 519 node); 520 tegra_dma_configure_for_next(tdc, hnsgreq); 521 } 522 } 523 524 static inline unsigned int 525 get_current_xferred_count(struct tegra_dma_channel *tdc, 526 struct tegra_dma_sg_req *sg_req, 527 unsigned long status) 528 { 529 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; 530 } 531 532 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) 533 { 534 struct tegra_dma_desc *dma_desc; 535 struct tegra_dma_sg_req *sgreq; 536 537 while (!list_empty(&tdc->pending_sg_req)) { 538 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), 539 node); 540 list_move_tail(&sgreq->node, &tdc->free_sg_req); 541 if (sgreq->last_sg) { 542 dma_desc = sgreq->dma_desc; 543 dma_desc->dma_status = DMA_ERROR; 544 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 545 546 /* Add in cb list if it is not there. */ 547 if (!dma_desc->cb_count) 548 list_add_tail(&dma_desc->cb_node, 549 &tdc->cb_desc); 550 dma_desc->cb_count++; 551 } 552 } 553 tdc->isr_handler = NULL; 554 } 555 556 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, 557 bool to_terminate) 558 { 559 struct tegra_dma_sg_req *hsgreq; 560 561 /* 562 * Check that head req on list should be in flight. 563 * If it is not in flight then abort transfer as 564 * looping of transfer can not continue. 565 */ 566 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 567 if (!hsgreq->configured) { 568 tegra_dma_stop(tdc); 569 pm_runtime_put(tdc->tdma->dev); 570 dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n"); 571 tegra_dma_abort_all(tdc); 572 return false; 573 } 574 575 /* Configure next request */ 576 if (!to_terminate) 577 tdc_configure_next_head_desc(tdc); 578 579 return true; 580 } 581 582 static void handle_once_dma_done(struct tegra_dma_channel *tdc, 583 bool to_terminate) 584 { 585 struct tegra_dma_desc *dma_desc; 586 struct tegra_dma_sg_req *sgreq; 587 588 tdc->busy = false; 589 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 590 dma_desc = sgreq->dma_desc; 591 dma_desc->bytes_transferred += sgreq->req_len; 592 593 list_del(&sgreq->node); 594 if (sgreq->last_sg) { 595 dma_desc->dma_status = DMA_COMPLETE; 596 dma_cookie_complete(&dma_desc->txd); 597 if (!dma_desc->cb_count) 598 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 599 dma_desc->cb_count++; 600 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 601 } 602 list_add_tail(&sgreq->node, &tdc->free_sg_req); 603 604 /* Do not start DMA if it is going to be terminate */ 605 if (to_terminate) 606 return; 607 608 if (list_empty(&tdc->pending_sg_req)) { 609 pm_runtime_put(tdc->tdma->dev); 610 return; 611 } 612 613 tdc_start_head_req(tdc); 614 } 615 616 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, 617 bool to_terminate) 618 { 619 struct tegra_dma_desc *dma_desc; 620 struct tegra_dma_sg_req *sgreq; 621 bool st; 622 623 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 624 dma_desc = sgreq->dma_desc; 625 /* if we dma for long enough the transfer count will wrap */ 626 dma_desc->bytes_transferred = 627 (dma_desc->bytes_transferred + sgreq->req_len) % 628 dma_desc->bytes_requested; 629 630 /* Callback need to be call */ 631 if (!dma_desc->cb_count) 632 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 633 dma_desc->cb_count++; 634 635 sgreq->words_xferred = 0; 636 637 /* If not last req then put at end of pending list */ 638 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 639 list_move_tail(&sgreq->node, &tdc->pending_sg_req); 640 sgreq->configured = false; 641 st = handle_continuous_head_request(tdc, to_terminate); 642 if (!st) 643 dma_desc->dma_status = DMA_ERROR; 644 } 645 } 646 647 static void tegra_dma_tasklet(unsigned long data) 648 { 649 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 650 struct dmaengine_desc_callback cb; 651 struct tegra_dma_desc *dma_desc; 652 unsigned int cb_count; 653 unsigned long flags; 654 655 spin_lock_irqsave(&tdc->lock, flags); 656 while (!list_empty(&tdc->cb_desc)) { 657 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), 658 cb_node); 659 list_del(&dma_desc->cb_node); 660 dmaengine_desc_get_callback(&dma_desc->txd, &cb); 661 cb_count = dma_desc->cb_count; 662 dma_desc->cb_count = 0; 663 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count, 664 cb.callback); 665 spin_unlock_irqrestore(&tdc->lock, flags); 666 while (cb_count--) 667 dmaengine_desc_callback_invoke(&cb, NULL); 668 spin_lock_irqsave(&tdc->lock, flags); 669 } 670 spin_unlock_irqrestore(&tdc->lock, flags); 671 } 672 673 static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 674 { 675 struct tegra_dma_channel *tdc = dev_id; 676 u32 status; 677 678 spin_lock(&tdc->lock); 679 680 trace_tegra_dma_isr(&tdc->dma_chan, irq); 681 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 682 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 683 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 684 tdc->isr_handler(tdc, false); 685 tasklet_schedule(&tdc->tasklet); 686 wake_up_all(&tdc->wq); 687 spin_unlock(&tdc->lock); 688 return IRQ_HANDLED; 689 } 690 691 spin_unlock(&tdc->lock); 692 dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n", 693 status); 694 695 return IRQ_NONE; 696 } 697 698 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) 699 { 700 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); 701 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); 702 unsigned long flags; 703 dma_cookie_t cookie; 704 705 spin_lock_irqsave(&tdc->lock, flags); 706 dma_desc->dma_status = DMA_IN_PROGRESS; 707 cookie = dma_cookie_assign(&dma_desc->txd); 708 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); 709 spin_unlock_irqrestore(&tdc->lock, flags); 710 711 return cookie; 712 } 713 714 static void tegra_dma_issue_pending(struct dma_chan *dc) 715 { 716 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 717 unsigned long flags; 718 int err; 719 720 spin_lock_irqsave(&tdc->lock, flags); 721 if (list_empty(&tdc->pending_sg_req)) { 722 dev_err(tdc2dev(tdc), "No DMA request\n"); 723 goto end; 724 } 725 if (!tdc->busy) { 726 err = pm_runtime_get_sync(tdc->tdma->dev); 727 if (err < 0) { 728 dev_err(tdc2dev(tdc), "Failed to enable DMA\n"); 729 goto end; 730 } 731 732 tdc_start_head_req(tdc); 733 734 /* Continuous single mode: Configure next req */ 735 if (tdc->cyclic) { 736 /* 737 * Wait for 1 burst time for configure DMA for 738 * next transfer. 739 */ 740 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 741 tdc_configure_next_head_desc(tdc); 742 } 743 } 744 end: 745 spin_unlock_irqrestore(&tdc->lock, flags); 746 } 747 748 static int tegra_dma_terminate_all(struct dma_chan *dc) 749 { 750 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 751 struct tegra_dma_desc *dma_desc; 752 struct tegra_dma_sg_req *sgreq; 753 unsigned long flags; 754 u32 status, wcount; 755 bool was_busy; 756 757 spin_lock_irqsave(&tdc->lock, flags); 758 759 if (!tdc->busy) 760 goto skip_dma_stop; 761 762 /* Pause DMA before checking the queue status */ 763 tegra_dma_pause(tdc, true); 764 765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 766 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); 768 tdc->isr_handler(tdc, true); 769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 770 } 771 if (tdc->tdma->chip_data->support_separate_wcount_reg) 772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); 773 else 774 wcount = status; 775 776 was_busy = tdc->busy; 777 tegra_dma_stop(tdc); 778 779 if (!list_empty(&tdc->pending_sg_req) && was_busy) { 780 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), 781 node); 782 sgreq->dma_desc->bytes_transferred += 783 get_current_xferred_count(tdc, sgreq, wcount); 784 } 785 tegra_dma_resume(tdc); 786 787 pm_runtime_put(tdc->tdma->dev); 788 wake_up_all(&tdc->wq); 789 790 skip_dma_stop: 791 tegra_dma_abort_all(tdc); 792 793 while (!list_empty(&tdc->cb_desc)) { 794 dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), 795 cb_node); 796 list_del(&dma_desc->cb_node); 797 dma_desc->cb_count = 0; 798 } 799 spin_unlock_irqrestore(&tdc->lock, flags); 800 801 return 0; 802 } 803 804 static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc) 805 { 806 unsigned long flags; 807 u32 status; 808 809 spin_lock_irqsave(&tdc->lock, flags); 810 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 811 spin_unlock_irqrestore(&tdc->lock, flags); 812 813 return !(status & TEGRA_APBDMA_STATUS_ISE_EOC); 814 } 815 816 static void tegra_dma_synchronize(struct dma_chan *dc) 817 { 818 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 819 820 /* 821 * CPU, which handles interrupt, could be busy in 822 * uninterruptible state, in this case sibling CPU 823 * should wait until interrupt is handled. 824 */ 825 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); 826 827 tasklet_kill(&tdc->tasklet); 828 } 829 830 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, 831 struct tegra_dma_sg_req *sg_req) 832 { 833 u32 status, wcount = 0; 834 835 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req)) 836 return 0; 837 838 if (tdc->tdma->chip_data->support_separate_wcount_reg) 839 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); 840 841 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 842 843 if (!tdc->tdma->chip_data->support_separate_wcount_reg) 844 wcount = status; 845 846 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) 847 return sg_req->req_len; 848 849 wcount = get_current_xferred_count(tdc, sg_req, wcount); 850 851 if (!wcount) { 852 /* 853 * If wcount wasn't ever polled for this SG before, then 854 * simply assume that transfer hasn't started yet. 855 * 856 * Otherwise it's the end of the transfer. 857 * 858 * The alternative would be to poll the status register 859 * until EOC bit is set or wcount goes UP. That's so 860 * because EOC bit is getting set only after the last 861 * burst's completion and counter is less than the actual 862 * transfer size by 4 bytes. The counter value wraps around 863 * in a cyclic mode before EOC is set(!), so we can't easily 864 * distinguish start of transfer from its end. 865 */ 866 if (sg_req->words_xferred) 867 wcount = sg_req->req_len - 4; 868 869 } else if (wcount < sg_req->words_xferred) { 870 /* 871 * This case will never happen for a non-cyclic transfer. 872 * 873 * For a cyclic transfer, although it is possible for the 874 * next transfer to have already started (resetting the word 875 * count), this case should still not happen because we should 876 * have detected that the EOC bit is set and hence the transfer 877 * was completed. 878 */ 879 WARN_ON_ONCE(1); 880 881 wcount = sg_req->req_len - 4; 882 } else { 883 sg_req->words_xferred = wcount; 884 } 885 886 return wcount; 887 } 888 889 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 890 dma_cookie_t cookie, 891 struct dma_tx_state *txstate) 892 { 893 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 894 struct tegra_dma_desc *dma_desc; 895 struct tegra_dma_sg_req *sg_req; 896 enum dma_status ret; 897 unsigned long flags; 898 unsigned int residual; 899 unsigned int bytes = 0; 900 901 ret = dma_cookie_status(dc, cookie, txstate); 902 if (ret == DMA_COMPLETE) 903 return ret; 904 905 spin_lock_irqsave(&tdc->lock, flags); 906 907 /* Check on wait_ack desc status */ 908 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 909 if (dma_desc->txd.cookie == cookie) { 910 ret = dma_desc->dma_status; 911 goto found; 912 } 913 } 914 915 /* Check in pending list */ 916 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 917 dma_desc = sg_req->dma_desc; 918 if (dma_desc->txd.cookie == cookie) { 919 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req); 920 ret = dma_desc->dma_status; 921 goto found; 922 } 923 } 924 925 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); 926 dma_desc = NULL; 927 928 found: 929 if (dma_desc && txstate) { 930 residual = dma_desc->bytes_requested - 931 ((dma_desc->bytes_transferred + bytes) % 932 dma_desc->bytes_requested); 933 dma_set_residue(txstate, residual); 934 } 935 936 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate); 937 spin_unlock_irqrestore(&tdc->lock, flags); 938 939 return ret; 940 } 941 942 static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc, 943 enum dma_slave_buswidth slave_bw) 944 { 945 switch (slave_bw) { 946 case DMA_SLAVE_BUSWIDTH_1_BYTE: 947 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; 948 case DMA_SLAVE_BUSWIDTH_2_BYTES: 949 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; 950 case DMA_SLAVE_BUSWIDTH_4_BYTES: 951 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 952 case DMA_SLAVE_BUSWIDTH_8_BYTES: 953 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; 954 default: 955 dev_warn(tdc2dev(tdc), 956 "slave bw is not supported, using 32bits\n"); 957 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 958 } 959 } 960 961 static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc, 962 u32 burst_size, 963 enum dma_slave_buswidth slave_bw, 964 u32 len) 965 { 966 unsigned int burst_byte, burst_ahb_width; 967 968 /* 969 * burst_size from client is in terms of the bus_width. 970 * convert them into AHB memory width which is 4 byte. 971 */ 972 burst_byte = burst_size * slave_bw; 973 burst_ahb_width = burst_byte / 4; 974 975 /* If burst size is 0 then calculate the burst size based on length */ 976 if (!burst_ahb_width) { 977 if (len & 0xF) 978 return TEGRA_APBDMA_AHBSEQ_BURST_1; 979 else if ((len >> 4) & 0x1) 980 return TEGRA_APBDMA_AHBSEQ_BURST_4; 981 else 982 return TEGRA_APBDMA_AHBSEQ_BURST_8; 983 } 984 if (burst_ahb_width < 4) 985 return TEGRA_APBDMA_AHBSEQ_BURST_1; 986 else if (burst_ahb_width < 8) 987 return TEGRA_APBDMA_AHBSEQ_BURST_4; 988 else 989 return TEGRA_APBDMA_AHBSEQ_BURST_8; 990 } 991 992 static int get_transfer_param(struct tegra_dma_channel *tdc, 993 enum dma_transfer_direction direction, 994 u32 *apb_addr, 995 u32 *apb_seq, 996 u32 *csr, 997 unsigned int *burst_size, 998 enum dma_slave_buswidth *slave_bw) 999 { 1000 switch (direction) { 1001 case DMA_MEM_TO_DEV: 1002 *apb_addr = tdc->dma_sconfig.dst_addr; 1003 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 1004 *burst_size = tdc->dma_sconfig.dst_maxburst; 1005 *slave_bw = tdc->dma_sconfig.dst_addr_width; 1006 *csr = TEGRA_APBDMA_CSR_DIR; 1007 return 0; 1008 1009 case DMA_DEV_TO_MEM: 1010 *apb_addr = tdc->dma_sconfig.src_addr; 1011 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 1012 *burst_size = tdc->dma_sconfig.src_maxburst; 1013 *slave_bw = tdc->dma_sconfig.src_addr_width; 1014 *csr = 0; 1015 return 0; 1016 1017 default: 1018 dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 1019 break; 1020 } 1021 1022 return -EINVAL; 1023 } 1024 1025 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, 1026 struct tegra_dma_channel_regs *ch_regs, 1027 u32 len) 1028 { 1029 u32 len_field = (len - 4) & 0xFFFC; 1030 1031 if (tdc->tdma->chip_data->support_separate_wcount_reg) 1032 ch_regs->wcount = len_field; 1033 else 1034 ch_regs->csr |= len_field; 1035 } 1036 1037 static struct dma_async_tx_descriptor * 1038 tegra_dma_prep_slave_sg(struct dma_chan *dc, 1039 struct scatterlist *sgl, 1040 unsigned int sg_len, 1041 enum dma_transfer_direction direction, 1042 unsigned long flags, 1043 void *context) 1044 { 1045 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1046 struct tegra_dma_sg_req *sg_req = NULL; 1047 u32 csr, ahb_seq, apb_ptr, apb_seq; 1048 enum dma_slave_buswidth slave_bw; 1049 struct tegra_dma_desc *dma_desc; 1050 struct list_head req_list; 1051 struct scatterlist *sg; 1052 unsigned int burst_size; 1053 unsigned int i; 1054 1055 if (!tdc->config_init) { 1056 dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); 1057 return NULL; 1058 } 1059 if (sg_len < 1) { 1060 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 1061 return NULL; 1062 } 1063 1064 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1065 &burst_size, &slave_bw) < 0) 1066 return NULL; 1067 1068 INIT_LIST_HEAD(&req_list); 1069 1070 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1071 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1072 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1073 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1074 1075 csr |= TEGRA_APBDMA_CSR_ONCE; 1076 1077 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { 1078 csr |= TEGRA_APBDMA_CSR_FLOW; 1079 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1080 } 1081 1082 if (flags & DMA_PREP_INTERRUPT) { 1083 csr |= TEGRA_APBDMA_CSR_IE_EOC; 1084 } else { 1085 WARN_ON_ONCE(1); 1086 return NULL; 1087 } 1088 1089 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1090 1091 dma_desc = tegra_dma_desc_get(tdc); 1092 if (!dma_desc) { 1093 dev_err(tdc2dev(tdc), "DMA descriptors not available\n"); 1094 return NULL; 1095 } 1096 INIT_LIST_HEAD(&dma_desc->tx_list); 1097 INIT_LIST_HEAD(&dma_desc->cb_node); 1098 dma_desc->cb_count = 0; 1099 dma_desc->bytes_requested = 0; 1100 dma_desc->bytes_transferred = 0; 1101 dma_desc->dma_status = DMA_IN_PROGRESS; 1102 1103 /* Make transfer requests */ 1104 for_each_sg(sgl, sg, sg_len, i) { 1105 u32 len, mem; 1106 1107 mem = sg_dma_address(sg); 1108 len = sg_dma_len(sg); 1109 1110 if ((len & 3) || (mem & 3) || 1111 len > tdc->tdma->chip_data->max_dma_count) { 1112 dev_err(tdc2dev(tdc), 1113 "DMA length/memory address is not supported\n"); 1114 tegra_dma_desc_put(tdc, dma_desc); 1115 return NULL; 1116 } 1117 1118 sg_req = tegra_dma_sg_req_get(tdc); 1119 if (!sg_req) { 1120 dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); 1121 tegra_dma_desc_put(tdc, dma_desc); 1122 return NULL; 1123 } 1124 1125 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1126 dma_desc->bytes_requested += len; 1127 1128 sg_req->ch_regs.apb_ptr = apb_ptr; 1129 sg_req->ch_regs.ahb_ptr = mem; 1130 sg_req->ch_regs.csr = csr; 1131 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); 1132 sg_req->ch_regs.apb_seq = apb_seq; 1133 sg_req->ch_regs.ahb_seq = ahb_seq; 1134 sg_req->configured = false; 1135 sg_req->last_sg = false; 1136 sg_req->dma_desc = dma_desc; 1137 sg_req->req_len = len; 1138 1139 list_add_tail(&sg_req->node, &dma_desc->tx_list); 1140 } 1141 sg_req->last_sg = true; 1142 if (flags & DMA_CTRL_ACK) 1143 dma_desc->txd.flags = DMA_CTRL_ACK; 1144 1145 /* 1146 * Make sure that mode should not be conflicting with currently 1147 * configured mode. 1148 */ 1149 if (!tdc->isr_handler) { 1150 tdc->isr_handler = handle_once_dma_done; 1151 tdc->cyclic = false; 1152 } else { 1153 if (tdc->cyclic) { 1154 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); 1155 tegra_dma_desc_put(tdc, dma_desc); 1156 return NULL; 1157 } 1158 } 1159 1160 return &dma_desc->txd; 1161 } 1162 1163 static struct dma_async_tx_descriptor * 1164 tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, 1165 size_t buf_len, 1166 size_t period_len, 1167 enum dma_transfer_direction direction, 1168 unsigned long flags) 1169 { 1170 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1171 struct tegra_dma_sg_req *sg_req = NULL; 1172 u32 csr, ahb_seq, apb_ptr, apb_seq; 1173 enum dma_slave_buswidth slave_bw; 1174 struct tegra_dma_desc *dma_desc; 1175 dma_addr_t mem = buf_addr; 1176 unsigned int burst_size; 1177 size_t len, remain_len; 1178 1179 if (!buf_len || !period_len) { 1180 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1181 return NULL; 1182 } 1183 1184 if (!tdc->config_init) { 1185 dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1186 return NULL; 1187 } 1188 1189 /* 1190 * We allow to take more number of requests till DMA is 1191 * not started. The driver will loop over all requests. 1192 * Once DMA is started then new requests can be queued only after 1193 * terminating the DMA. 1194 */ 1195 if (tdc->busy) { 1196 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n"); 1197 return NULL; 1198 } 1199 1200 /* 1201 * We only support cycle transfer when buf_len is multiple of 1202 * period_len. 1203 */ 1204 if (buf_len % period_len) { 1205 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1206 return NULL; 1207 } 1208 1209 len = period_len; 1210 if ((len & 3) || (buf_addr & 3) || 1211 len > tdc->tdma->chip_data->max_dma_count) { 1212 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1213 return NULL; 1214 } 1215 1216 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1217 &burst_size, &slave_bw) < 0) 1218 return NULL; 1219 1220 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1221 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1222 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1223 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1224 1225 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { 1226 csr |= TEGRA_APBDMA_CSR_FLOW; 1227 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1228 } 1229 1230 if (flags & DMA_PREP_INTERRUPT) { 1231 csr |= TEGRA_APBDMA_CSR_IE_EOC; 1232 } else { 1233 WARN_ON_ONCE(1); 1234 return NULL; 1235 } 1236 1237 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1238 1239 dma_desc = tegra_dma_desc_get(tdc); 1240 if (!dma_desc) { 1241 dev_err(tdc2dev(tdc), "not enough descriptors available\n"); 1242 return NULL; 1243 } 1244 1245 INIT_LIST_HEAD(&dma_desc->tx_list); 1246 INIT_LIST_HEAD(&dma_desc->cb_node); 1247 dma_desc->cb_count = 0; 1248 1249 dma_desc->bytes_transferred = 0; 1250 dma_desc->bytes_requested = buf_len; 1251 remain_len = buf_len; 1252 1253 /* Split transfer equal to period size */ 1254 while (remain_len) { 1255 sg_req = tegra_dma_sg_req_get(tdc); 1256 if (!sg_req) { 1257 dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); 1258 tegra_dma_desc_put(tdc, dma_desc); 1259 return NULL; 1260 } 1261 1262 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1263 sg_req->ch_regs.apb_ptr = apb_ptr; 1264 sg_req->ch_regs.ahb_ptr = mem; 1265 sg_req->ch_regs.csr = csr; 1266 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); 1267 sg_req->ch_regs.apb_seq = apb_seq; 1268 sg_req->ch_regs.ahb_seq = ahb_seq; 1269 sg_req->configured = false; 1270 sg_req->last_sg = false; 1271 sg_req->dma_desc = dma_desc; 1272 sg_req->req_len = len; 1273 1274 list_add_tail(&sg_req->node, &dma_desc->tx_list); 1275 remain_len -= len; 1276 mem += len; 1277 } 1278 sg_req->last_sg = true; 1279 if (flags & DMA_CTRL_ACK) 1280 dma_desc->txd.flags = DMA_CTRL_ACK; 1281 1282 /* 1283 * Make sure that mode should not be conflicting with currently 1284 * configured mode. 1285 */ 1286 if (!tdc->isr_handler) { 1287 tdc->isr_handler = handle_cont_sngl_cycle_dma_done; 1288 tdc->cyclic = true; 1289 } else { 1290 if (!tdc->cyclic) { 1291 dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); 1292 tegra_dma_desc_put(tdc, dma_desc); 1293 return NULL; 1294 } 1295 } 1296 1297 return &dma_desc->txd; 1298 } 1299 1300 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1301 { 1302 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1303 1304 dma_cookie_init(&tdc->dma_chan); 1305 1306 return 0; 1307 } 1308 1309 static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1310 { 1311 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1312 struct tegra_dma_desc *dma_desc; 1313 struct tegra_dma_sg_req *sg_req; 1314 struct list_head dma_desc_list; 1315 struct list_head sg_req_list; 1316 1317 INIT_LIST_HEAD(&dma_desc_list); 1318 INIT_LIST_HEAD(&sg_req_list); 1319 1320 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1321 1322 tegra_dma_terminate_all(dc); 1323 tasklet_kill(&tdc->tasklet); 1324 1325 list_splice_init(&tdc->pending_sg_req, &sg_req_list); 1326 list_splice_init(&tdc->free_sg_req, &sg_req_list); 1327 list_splice_init(&tdc->free_dma_desc, &dma_desc_list); 1328 INIT_LIST_HEAD(&tdc->cb_desc); 1329 tdc->config_init = false; 1330 tdc->isr_handler = NULL; 1331 1332 while (!list_empty(&dma_desc_list)) { 1333 dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc), 1334 node); 1335 list_del(&dma_desc->node); 1336 kfree(dma_desc); 1337 } 1338 1339 while (!list_empty(&sg_req_list)) { 1340 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); 1341 list_del(&sg_req->node); 1342 kfree(sg_req); 1343 } 1344 1345 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; 1346 } 1347 1348 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 1349 struct of_dma *ofdma) 1350 { 1351 struct tegra_dma *tdma = ofdma->of_dma_data; 1352 struct tegra_dma_channel *tdc; 1353 struct dma_chan *chan; 1354 1355 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) { 1356 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]); 1357 return NULL; 1358 } 1359 1360 chan = dma_get_any_slave_channel(&tdma->dma_dev); 1361 if (!chan) 1362 return NULL; 1363 1364 tdc = to_tegra_dma_chan(chan); 1365 tdc->slave_id = dma_spec->args[0]; 1366 1367 return chan; 1368 } 1369 1370 /* Tegra20 specific DMA controller information */ 1371 static const struct tegra_dma_chip_data tegra20_dma_chip_data = { 1372 .nr_channels = 16, 1373 .channel_reg_size = 0x20, 1374 .max_dma_count = 1024UL * 64, 1375 .support_channel_pause = false, 1376 .support_separate_wcount_reg = false, 1377 }; 1378 1379 /* Tegra30 specific DMA controller information */ 1380 static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1381 .nr_channels = 32, 1382 .channel_reg_size = 0x20, 1383 .max_dma_count = 1024UL * 64, 1384 .support_channel_pause = false, 1385 .support_separate_wcount_reg = false, 1386 }; 1387 1388 /* Tegra114 specific DMA controller information */ 1389 static const struct tegra_dma_chip_data tegra114_dma_chip_data = { 1390 .nr_channels = 32, 1391 .channel_reg_size = 0x20, 1392 .max_dma_count = 1024UL * 64, 1393 .support_channel_pause = true, 1394 .support_separate_wcount_reg = false, 1395 }; 1396 1397 /* Tegra148 specific DMA controller information */ 1398 static const struct tegra_dma_chip_data tegra148_dma_chip_data = { 1399 .nr_channels = 32, 1400 .channel_reg_size = 0x40, 1401 .max_dma_count = 1024UL * 64, 1402 .support_channel_pause = true, 1403 .support_separate_wcount_reg = true, 1404 }; 1405 1406 static int tegra_dma_init_hw(struct tegra_dma *tdma) 1407 { 1408 int err; 1409 1410 err = reset_control_assert(tdma->rst); 1411 if (err) { 1412 dev_err(tdma->dev, "failed to assert reset: %d\n", err); 1413 return err; 1414 } 1415 1416 err = clk_enable(tdma->dma_clk); 1417 if (err) { 1418 dev_err(tdma->dev, "failed to enable clk: %d\n", err); 1419 return err; 1420 } 1421 1422 /* reset DMA controller */ 1423 udelay(2); 1424 reset_control_deassert(tdma->rst); 1425 1426 /* enable global DMA registers */ 1427 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 1428 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1429 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF); 1430 1431 clk_disable(tdma->dma_clk); 1432 1433 return 0; 1434 } 1435 1436 static int tegra_dma_probe(struct platform_device *pdev) 1437 { 1438 const struct tegra_dma_chip_data *cdata; 1439 struct tegra_dma *tdma; 1440 unsigned int i; 1441 size_t size; 1442 int ret; 1443 1444 cdata = of_device_get_match_data(&pdev->dev); 1445 size = struct_size(tdma, channels, cdata->nr_channels); 1446 1447 tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 1448 if (!tdma) 1449 return -ENOMEM; 1450 1451 tdma->dev = &pdev->dev; 1452 tdma->chip_data = cdata; 1453 platform_set_drvdata(pdev, tdma); 1454 1455 tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); 1456 if (IS_ERR(tdma->base_addr)) 1457 return PTR_ERR(tdma->base_addr); 1458 1459 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); 1460 if (IS_ERR(tdma->dma_clk)) { 1461 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1462 return PTR_ERR(tdma->dma_clk); 1463 } 1464 1465 tdma->rst = devm_reset_control_get(&pdev->dev, "dma"); 1466 if (IS_ERR(tdma->rst)) { 1467 dev_err(&pdev->dev, "Error: Missing reset\n"); 1468 return PTR_ERR(tdma->rst); 1469 } 1470 1471 spin_lock_init(&tdma->global_lock); 1472 1473 ret = clk_prepare(tdma->dma_clk); 1474 if (ret) 1475 return ret; 1476 1477 ret = tegra_dma_init_hw(tdma); 1478 if (ret) 1479 goto err_clk_unprepare; 1480 1481 pm_runtime_irq_safe(&pdev->dev); 1482 pm_runtime_enable(&pdev->dev); 1483 1484 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1485 for (i = 0; i < cdata->nr_channels; i++) { 1486 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1487 int irq; 1488 1489 tdc->chan_addr = tdma->base_addr + 1490 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1491 (i * cdata->channel_reg_size); 1492 1493 irq = platform_get_irq(pdev, i); 1494 if (irq < 0) { 1495 ret = irq; 1496 goto err_pm_disable; 1497 } 1498 1499 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); 1500 ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0, 1501 tdc->name, tdc); 1502 if (ret) { 1503 dev_err(&pdev->dev, 1504 "request_irq failed with err %d channel %d\n", 1505 ret, i); 1506 goto err_pm_disable; 1507 } 1508 1509 tdc->dma_chan.device = &tdma->dma_dev; 1510 dma_cookie_init(&tdc->dma_chan); 1511 list_add_tail(&tdc->dma_chan.device_node, 1512 &tdma->dma_dev.channels); 1513 tdc->tdma = tdma; 1514 tdc->id = i; 1515 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; 1516 1517 tasklet_init(&tdc->tasklet, tegra_dma_tasklet, 1518 (unsigned long)tdc); 1519 spin_lock_init(&tdc->lock); 1520 init_waitqueue_head(&tdc->wq); 1521 1522 INIT_LIST_HEAD(&tdc->pending_sg_req); 1523 INIT_LIST_HEAD(&tdc->free_sg_req); 1524 INIT_LIST_HEAD(&tdc->free_dma_desc); 1525 INIT_LIST_HEAD(&tdc->cb_desc); 1526 } 1527 1528 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1529 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1530 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1531 1532 tdma->global_pause_count = 0; 1533 tdma->dma_dev.dev = &pdev->dev; 1534 tdma->dma_dev.device_alloc_chan_resources = 1535 tegra_dma_alloc_chan_resources; 1536 tdma->dma_dev.device_free_chan_resources = 1537 tegra_dma_free_chan_resources; 1538 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1539 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1540 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1541 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1542 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1543 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); 1544 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1545 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1546 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1547 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); 1548 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1549 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1550 tdma->dma_dev.device_config = tegra_dma_slave_config; 1551 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; 1552 tdma->dma_dev.device_synchronize = tegra_dma_synchronize; 1553 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1554 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1555 1556 ret = dma_async_device_register(&tdma->dma_dev); 1557 if (ret < 0) { 1558 dev_err(&pdev->dev, 1559 "Tegra20 APB DMA driver registration failed %d\n", ret); 1560 goto err_pm_disable; 1561 } 1562 1563 ret = of_dma_controller_register(pdev->dev.of_node, 1564 tegra_dma_of_xlate, tdma); 1565 if (ret < 0) { 1566 dev_err(&pdev->dev, 1567 "Tegra20 APB DMA OF registration failed %d\n", ret); 1568 goto err_unregister_dma_dev; 1569 } 1570 1571 dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n", 1572 cdata->nr_channels); 1573 1574 return 0; 1575 1576 err_unregister_dma_dev: 1577 dma_async_device_unregister(&tdma->dma_dev); 1578 1579 err_pm_disable: 1580 pm_runtime_disable(&pdev->dev); 1581 1582 err_clk_unprepare: 1583 clk_unprepare(tdma->dma_clk); 1584 1585 return ret; 1586 } 1587 1588 static int tegra_dma_remove(struct platform_device *pdev) 1589 { 1590 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1591 1592 of_dma_controller_free(pdev->dev.of_node); 1593 dma_async_device_unregister(&tdma->dma_dev); 1594 pm_runtime_disable(&pdev->dev); 1595 clk_unprepare(tdma->dma_clk); 1596 1597 return 0; 1598 } 1599 1600 static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev) 1601 { 1602 struct tegra_dma *tdma = dev_get_drvdata(dev); 1603 1604 clk_disable(tdma->dma_clk); 1605 1606 return 0; 1607 } 1608 1609 static int __maybe_unused tegra_dma_runtime_resume(struct device *dev) 1610 { 1611 struct tegra_dma *tdma = dev_get_drvdata(dev); 1612 1613 return clk_enable(tdma->dma_clk); 1614 } 1615 1616 static int __maybe_unused tegra_dma_dev_suspend(struct device *dev) 1617 { 1618 struct tegra_dma *tdma = dev_get_drvdata(dev); 1619 unsigned long flags; 1620 unsigned int i; 1621 bool busy; 1622 1623 for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1624 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1625 1626 tasklet_kill(&tdc->tasklet); 1627 1628 spin_lock_irqsave(&tdc->lock, flags); 1629 busy = tdc->busy; 1630 spin_unlock_irqrestore(&tdc->lock, flags); 1631 1632 if (busy) { 1633 dev_err(tdma->dev, "channel %u busy\n", i); 1634 return -EBUSY; 1635 } 1636 } 1637 1638 return pm_runtime_force_suspend(dev); 1639 } 1640 1641 static int __maybe_unused tegra_dma_dev_resume(struct device *dev) 1642 { 1643 struct tegra_dma *tdma = dev_get_drvdata(dev); 1644 int err; 1645 1646 err = tegra_dma_init_hw(tdma); 1647 if (err) 1648 return err; 1649 1650 return pm_runtime_force_resume(dev); 1651 } 1652 1653 static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1654 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume, 1655 NULL) 1656 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume) 1657 }; 1658 1659 static const struct of_device_id tegra_dma_of_match[] = { 1660 { 1661 .compatible = "nvidia,tegra148-apbdma", 1662 .data = &tegra148_dma_chip_data, 1663 }, { 1664 .compatible = "nvidia,tegra114-apbdma", 1665 .data = &tegra114_dma_chip_data, 1666 }, { 1667 .compatible = "nvidia,tegra30-apbdma", 1668 .data = &tegra30_dma_chip_data, 1669 }, { 1670 .compatible = "nvidia,tegra20-apbdma", 1671 .data = &tegra20_dma_chip_data, 1672 }, { 1673 }, 1674 }; 1675 MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1676 1677 static struct platform_driver tegra_dmac_driver = { 1678 .driver = { 1679 .name = "tegra-apbdma", 1680 .pm = &tegra_dma_dev_pm_ops, 1681 .of_match_table = tegra_dma_of_match, 1682 }, 1683 .probe = tegra_dma_probe, 1684 .remove = tegra_dma_remove, 1685 }; 1686 1687 module_platform_driver(tegra_dmac_driver); 1688 1689 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); 1690 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1691 MODULE_LICENSE("GPL v2"); 1692