1 /* 2 * DMA driver for Altera mSGDMA IP core 3 * 4 * Copyright (C) 2017 Stefan Roese <sr@denx.de> 5 * 6 * Based on drivers/dma/xilinx/zynqmp_dma.c, which is: 7 * Copyright (C) 2016 Xilinx, Inc. All rights reserved. 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation, either version 2 of the License, or 12 * (at your option) any later version. 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/dmapool.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/iopoll.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 27 #include "dmaengine.h" 28 29 #define MSGDMA_MAX_TRANS_LEN U32_MAX 30 #define MSGDMA_DESC_NUM 1024 31 32 /** 33 * struct msgdma_extended_desc - implements an extended descriptor 34 * @read_addr_lo: data buffer source address low bits 35 * @write_addr_lo: data buffer destination address low bits 36 * @len: the number of bytes to transfer per descriptor 37 * @burst_seq_num: bit 31:24 write burst 38 * bit 23:16 read burst 39 * bit 15:00 sequence number 40 * @stride: bit 31:16 write stride 41 * bit 15:00 read stride 42 * @read_addr_hi: data buffer source address high bits 43 * @write_addr_hi: data buffer destination address high bits 44 * @control: characteristics of the transfer 45 */ 46 struct msgdma_extended_desc { 47 u32 read_addr_lo; 48 u32 write_addr_lo; 49 u32 len; 50 u32 burst_seq_num; 51 u32 stride; 52 u32 read_addr_hi; 53 u32 write_addr_hi; 54 u32 control; 55 }; 56 57 /* mSGDMA descriptor control field bit definitions */ 58 #define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) 59 #define MSGDMA_DESC_CTL_GEN_SOP BIT(8) 60 #define MSGDMA_DESC_CTL_GEN_EOP BIT(9) 61 #define MSGDMA_DESC_CTL_PARK_READS BIT(10) 62 #define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) 63 #define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) 64 #define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) 65 #define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) 66 #define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) 67 #define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16) 68 #define MSGDMA_DESC_CTL_EARLY_DONE BIT(24) 69 70 /* 71 * Writing "1" the "go" bit commits the entire descriptor into the 72 * descriptor FIFO(s) 73 */ 74 #define MSGDMA_DESC_CTL_GO BIT(31) 75 76 /* Tx buffer control flags */ 77 #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ 78 MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 79 MSGDMA_DESC_CTL_GO) 80 81 #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 82 MSGDMA_DESC_CTL_GO) 83 84 #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ 85 MSGDMA_DESC_CTL_TR_COMP_IRQ | \ 86 MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 87 MSGDMA_DESC_CTL_GO) 88 89 #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ 90 MSGDMA_DESC_CTL_GEN_EOP | \ 91 MSGDMA_DESC_CTL_TR_COMP_IRQ | \ 92 MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 93 MSGDMA_DESC_CTL_GO) 94 95 #define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ 96 MSGDMA_DESC_CTL_END_ON_LEN | \ 97 MSGDMA_DESC_CTL_TR_COMP_IRQ | \ 98 MSGDMA_DESC_CTL_EARLY_IRQ | \ 99 MSGDMA_DESC_CTL_TR_ERR_IRQ | \ 100 MSGDMA_DESC_CTL_GO) 101 102 /* mSGDMA extended descriptor stride definitions */ 103 #define MSGDMA_DESC_STRIDE_RD 0x00000001 104 #define MSGDMA_DESC_STRIDE_WR 0x00010000 105 #define MSGDMA_DESC_STRIDE_RW 0x00010001 106 107 /* mSGDMA dispatcher control and status register map */ 108 #define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */ 109 #define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */ 110 #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */ 111 /* 15:00 - read fill level */ 112 #define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */ 113 #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */ 114 /* 15:00 - read seq number */ 115 116 /* mSGDMA CSR status register bit definitions */ 117 #define MSGDMA_CSR_STAT_BUSY BIT(0) 118 #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1) 119 #define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2) 120 #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3) 121 #define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4) 122 #define MSGDMA_CSR_STAT_STOPPED BIT(5) 123 #define MSGDMA_CSR_STAT_RESETTING BIT(6) 124 #define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7) 125 #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8) 126 #define MSGDMA_CSR_STAT_IRQ BIT(9) 127 #define MSGDMA_CSR_STAT_MASK GENMASK(9, 0) 128 #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0) 129 130 #define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \ 131 MSGDMA_CSR_STAT_RESP_BUF_EMPTY) 132 133 /* mSGDMA CSR control register bit definitions */ 134 #define MSGDMA_CSR_CTL_STOP BIT(0) 135 #define MSGDMA_CSR_CTL_RESET BIT(1) 136 #define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2) 137 #define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3) 138 #define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4) 139 #define MSGDMA_CSR_CTL_STOP_DESCS BIT(5) 140 141 /* mSGDMA CSR fill level bits */ 142 #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16) 143 #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) 144 #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) 145 146 #define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16) 147 148 /* mSGDMA response register map */ 149 #define MSGDMA_RESP_BYTES_TRANSFERRED 0x00 150 #define MSGDMA_RESP_STATUS 0x04 151 152 /* mSGDMA response register bit definitions */ 153 #define MSGDMA_RESP_EARLY_TERM BIT(8) 154 #define MSGDMA_RESP_ERR_MASK 0xff 155 156 /** 157 * struct msgdma_sw_desc - implements a sw descriptor 158 * @async_tx: support for the async_tx api 159 * @hw_desc: assosiated HW descriptor 160 * @free_list: node of the free SW descriprots list 161 */ 162 struct msgdma_sw_desc { 163 struct dma_async_tx_descriptor async_tx; 164 struct msgdma_extended_desc hw_desc; 165 struct list_head node; 166 struct list_head tx_list; 167 }; 168 169 /** 170 * struct msgdma_device - DMA device structure 171 */ 172 struct msgdma_device { 173 spinlock_t lock; 174 struct device *dev; 175 struct tasklet_struct irq_tasklet; 176 struct list_head pending_list; 177 struct list_head free_list; 178 struct list_head active_list; 179 struct list_head done_list; 180 u32 desc_free_cnt; 181 bool idle; 182 183 struct dma_device dmadev; 184 struct dma_chan dmachan; 185 dma_addr_t hw_desq; 186 struct msgdma_sw_desc *sw_desq; 187 unsigned int npendings; 188 189 struct dma_slave_config slave_cfg; 190 191 int irq; 192 193 /* mSGDMA controller */ 194 void __iomem *csr; 195 196 /* mSGDMA descriptors */ 197 void __iomem *desc; 198 199 /* mSGDMA response */ 200 void __iomem *resp; 201 }; 202 203 #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan) 204 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx) 205 206 /** 207 * msgdma_get_descriptor - Get the sw descriptor from the pool 208 * @mdev: Pointer to the Altera mSGDMA device structure 209 * 210 * Return: The sw descriptor 211 */ 212 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 213 { 214 struct msgdma_sw_desc *desc; 215 216 spin_lock_bh(&mdev->lock); 217 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 list_del(&desc->node); 219 spin_unlock_bh(&mdev->lock); 220 221 INIT_LIST_HEAD(&desc->tx_list); 222 223 return desc; 224 } 225 226 /** 227 * msgdma_free_descriptor - Issue pending transactions 228 * @mdev: Pointer to the Altera mSGDMA device structure 229 * @desc: Transaction descriptor pointer 230 */ 231 static void msgdma_free_descriptor(struct msgdma_device *mdev, 232 struct msgdma_sw_desc *desc) 233 { 234 struct msgdma_sw_desc *child, *next; 235 236 mdev->desc_free_cnt++; 237 list_add_tail(&desc->node, &mdev->free_list); 238 list_for_each_entry_safe(child, next, &desc->tx_list, node) { 239 mdev->desc_free_cnt++; 240 list_move_tail(&child->node, &mdev->free_list); 241 } 242 } 243 244 /** 245 * msgdma_free_desc_list - Free descriptors list 246 * @mdev: Pointer to the Altera mSGDMA device structure 247 * @list: List to parse and delete the descriptor 248 */ 249 static void msgdma_free_desc_list(struct msgdma_device *mdev, 250 struct list_head *list) 251 { 252 struct msgdma_sw_desc *desc, *next; 253 254 list_for_each_entry_safe(desc, next, list, node) 255 msgdma_free_descriptor(mdev, desc); 256 } 257 258 /** 259 * msgdma_desc_config - Configure the descriptor 260 * @desc: Hw descriptor pointer 261 * @dst: Destination buffer address 262 * @src: Source buffer address 263 * @len: Transfer length 264 */ 265 static void msgdma_desc_config(struct msgdma_extended_desc *desc, 266 dma_addr_t dst, dma_addr_t src, size_t len, 267 u32 stride) 268 { 269 /* Set lower 32bits of src & dst addresses in the descriptor */ 270 desc->read_addr_lo = lower_32_bits(src); 271 desc->write_addr_lo = lower_32_bits(dst); 272 273 /* Set upper 32bits of src & dst addresses in the descriptor */ 274 desc->read_addr_hi = upper_32_bits(src); 275 desc->write_addr_hi = upper_32_bits(dst); 276 277 desc->len = len; 278 desc->stride = stride; 279 desc->burst_seq_num = 0; /* 0 will result in max burst length */ 280 281 /* 282 * Don't set interrupt on xfer end yet, this will be done later 283 * for the "last" descriptor 284 */ 285 desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO | 286 MSGDMA_DESC_CTL_END_ON_LEN; 287 } 288 289 /** 290 * msgdma_desc_config_eod - Mark the descriptor as end descriptor 291 * @desc: Hw descriptor pointer 292 */ 293 static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc) 294 { 295 desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ; 296 } 297 298 /** 299 * msgdma_tx_submit - Submit DMA transaction 300 * @tx: Async transaction descriptor pointer 301 * 302 * Return: cookie value 303 */ 304 static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) 305 { 306 struct msgdma_device *mdev = to_mdev(tx->chan); 307 struct msgdma_sw_desc *new; 308 dma_cookie_t cookie; 309 310 new = tx_to_desc(tx); 311 spin_lock_bh(&mdev->lock); 312 cookie = dma_cookie_assign(tx); 313 314 list_add_tail(&new->node, &mdev->pending_list); 315 spin_unlock_bh(&mdev->lock); 316 317 return cookie; 318 } 319 320 /** 321 * msgdma_prep_memcpy - prepare descriptors for memcpy transaction 322 * @dchan: DMA channel 323 * @dma_dst: Destination buffer address 324 * @dma_src: Source buffer address 325 * @len: Transfer length 326 * @flags: transfer ack flags 327 * 328 * Return: Async transaction descriptor on success and NULL on failure 329 */ 330 static struct dma_async_tx_descriptor * 331 msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 332 dma_addr_t dma_src, size_t len, ulong flags) 333 { 334 struct msgdma_device *mdev = to_mdev(dchan); 335 struct msgdma_sw_desc *new, *first = NULL; 336 struct msgdma_extended_desc *desc; 337 size_t copy; 338 u32 desc_cnt; 339 340 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 341 342 spin_lock_bh(&mdev->lock); 343 if (desc_cnt > mdev->desc_free_cnt) { 344 spin_unlock_bh(&mdev->lock); 345 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 346 return NULL; 347 } 348 mdev->desc_free_cnt -= desc_cnt; 349 spin_unlock_bh(&mdev->lock); 350 351 do { 352 /* Allocate and populate the descriptor */ 353 new = msgdma_get_descriptor(mdev); 354 355 copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN); 356 desc = &new->hw_desc; 357 msgdma_desc_config(desc, dma_dst, dma_src, copy, 358 MSGDMA_DESC_STRIDE_RW); 359 len -= copy; 360 dma_src += copy; 361 dma_dst += copy; 362 if (!first) 363 first = new; 364 else 365 list_add_tail(&new->node, &first->tx_list); 366 } while (len); 367 368 msgdma_desc_config_eod(desc); 369 async_tx_ack(&first->async_tx); 370 first->async_tx.flags = flags; 371 372 return &first->async_tx; 373 } 374 375 /** 376 * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction 377 * 378 * @dchan: DMA channel 379 * @sgl: Destination scatter list 380 * @sg_len: Number of entries in destination scatter list 381 * @dir: DMA transfer direction 382 * @flags: transfer ack flags 383 * @context: transfer context (unused) 384 */ 385 static struct dma_async_tx_descriptor * 386 msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 387 unsigned int sg_len, enum dma_transfer_direction dir, 388 unsigned long flags, void *context) 389 390 { 391 struct msgdma_device *mdev = to_mdev(dchan); 392 struct dma_slave_config *cfg = &mdev->slave_cfg; 393 struct msgdma_sw_desc *new, *first = NULL; 394 void *desc = NULL; 395 size_t len, avail; 396 dma_addr_t dma_dst, dma_src; 397 u32 desc_cnt = 0, i; 398 struct scatterlist *sg; 399 u32 stride; 400 401 for_each_sg(sgl, sg, sg_len, i) 402 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 403 404 spin_lock_bh(&mdev->lock); 405 if (desc_cnt > mdev->desc_free_cnt) { 406 spin_unlock_bh(&mdev->lock); 407 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 408 return NULL; 409 } 410 mdev->desc_free_cnt -= desc_cnt; 411 spin_unlock_bh(&mdev->lock); 412 413 avail = sg_dma_len(sgl); 414 415 /* Run until we are out of scatterlist entries */ 416 while (true) { 417 /* Allocate and populate the descriptor */ 418 new = msgdma_get_descriptor(mdev); 419 420 desc = &new->hw_desc; 421 len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN); 422 423 if (dir == DMA_MEM_TO_DEV) { 424 dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; 425 dma_dst = cfg->dst_addr; 426 stride = MSGDMA_DESC_STRIDE_RD; 427 } else { 428 dma_src = cfg->src_addr; 429 dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; 430 stride = MSGDMA_DESC_STRIDE_WR; 431 } 432 msgdma_desc_config(desc, dma_dst, dma_src, len, stride); 433 avail -= len; 434 435 if (!first) 436 first = new; 437 else 438 list_add_tail(&new->node, &first->tx_list); 439 440 /* Fetch the next scatterlist entry */ 441 if (avail == 0) { 442 if (sg_len == 0) 443 break; 444 sgl = sg_next(sgl); 445 if (sgl == NULL) 446 break; 447 sg_len--; 448 avail = sg_dma_len(sgl); 449 } 450 } 451 452 msgdma_desc_config_eod(desc); 453 first->async_tx.flags = flags; 454 455 return &first->async_tx; 456 } 457 458 static int msgdma_dma_config(struct dma_chan *dchan, 459 struct dma_slave_config *config) 460 { 461 struct msgdma_device *mdev = to_mdev(dchan); 462 463 memcpy(&mdev->slave_cfg, config, sizeof(*config)); 464 465 return 0; 466 } 467 468 static void msgdma_reset(struct msgdma_device *mdev) 469 { 470 u32 val; 471 int ret; 472 473 /* Reset mSGDMA */ 474 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); 475 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); 476 477 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, 478 (val & MSGDMA_CSR_STAT_RESETTING) == 0, 479 1, 10000); 480 if (ret) 481 dev_err(mdev->dev, "DMA channel did not reset\n"); 482 483 /* Clear all status bits */ 484 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); 485 486 /* Enable the DMA controller including interrupts */ 487 iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY | 488 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); 489 490 mdev->idle = true; 491 }; 492 493 static void msgdma_copy_one(struct msgdma_device *mdev, 494 struct msgdma_sw_desc *desc) 495 { 496 void __iomem *hw_desc = mdev->desc; 497 498 /* 499 * Check if the DESC FIFO it not full. If its full, we need to wait 500 * for at least one entry to become free again 501 */ 502 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & 503 MSGDMA_CSR_STAT_DESC_BUF_FULL) 504 mdelay(1); 505 506 /* 507 * The descriptor needs to get copied into the descriptor FIFO 508 * of the DMA controller. The descriptor will get flushed to the 509 * FIFO, once the last word (control word) is written. Since we 510 * are not 100% sure that memcpy() writes all word in the "correct" 511 * oder (address from low to high) on all architectures, we make 512 * sure this control word is written last by single coding it and 513 * adding some write-barriers here. 514 */ 515 memcpy((void __force *)hw_desc, &desc->hw_desc, 516 sizeof(desc->hw_desc) - sizeof(u32)); 517 518 /* Write control word last to flush this descriptor into the FIFO */ 519 mdev->idle = false; 520 wmb(); 521 iowrite32(desc->hw_desc.control, hw_desc + 522 offsetof(struct msgdma_extended_desc, control)); 523 wmb(); 524 } 525 526 /** 527 * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO 528 * @mdev: Pointer to the Altera mSGDMA device structure 529 * @desc: Transaction descriptor pointer 530 */ 531 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, 532 struct msgdma_sw_desc *desc) 533 { 534 struct msgdma_sw_desc *sdesc, *next; 535 536 msgdma_copy_one(mdev, desc); 537 538 list_for_each_entry_safe(sdesc, next, &desc->tx_list, node) 539 msgdma_copy_one(mdev, sdesc); 540 } 541 542 /** 543 * msgdma_start_transfer - Initiate the new transfer 544 * @mdev: Pointer to the Altera mSGDMA device structure 545 */ 546 static void msgdma_start_transfer(struct msgdma_device *mdev) 547 { 548 struct msgdma_sw_desc *desc; 549 550 if (!mdev->idle) 551 return; 552 553 desc = list_first_entry_or_null(&mdev->pending_list, 554 struct msgdma_sw_desc, node); 555 if (!desc) 556 return; 557 558 list_splice_tail_init(&mdev->pending_list, &mdev->active_list); 559 msgdma_copy_desc_to_fifo(mdev, desc); 560 } 561 562 /** 563 * msgdma_issue_pending - Issue pending transactions 564 * @chan: DMA channel pointer 565 */ 566 static void msgdma_issue_pending(struct dma_chan *chan) 567 { 568 struct msgdma_device *mdev = to_mdev(chan); 569 570 spin_lock_bh(&mdev->lock); 571 msgdma_start_transfer(mdev); 572 spin_unlock_bh(&mdev->lock); 573 } 574 575 /** 576 * msgdma_chan_desc_cleanup - Cleanup the completed descriptors 577 * @mdev: Pointer to the Altera mSGDMA device structure 578 */ 579 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) 580 { 581 struct msgdma_sw_desc *desc, *next; 582 583 list_for_each_entry_safe(desc, next, &mdev->done_list, node) { 584 dma_async_tx_callback callback; 585 void *callback_param; 586 587 list_del(&desc->node); 588 589 callback = desc->async_tx.callback; 590 callback_param = desc->async_tx.callback_param; 591 if (callback) { 592 spin_unlock(&mdev->lock); 593 callback(callback_param); 594 spin_lock(&mdev->lock); 595 } 596 597 /* Run any dependencies, then free the descriptor */ 598 msgdma_free_descriptor(mdev, desc); 599 } 600 } 601 602 /** 603 * msgdma_complete_descriptor - Mark the active descriptor as complete 604 * @mdev: Pointer to the Altera mSGDMA device structure 605 */ 606 static void msgdma_complete_descriptor(struct msgdma_device *mdev) 607 { 608 struct msgdma_sw_desc *desc; 609 610 desc = list_first_entry_or_null(&mdev->active_list, 611 struct msgdma_sw_desc, node); 612 if (!desc) 613 return; 614 list_del(&desc->node); 615 dma_cookie_complete(&desc->async_tx); 616 list_add_tail(&desc->node, &mdev->done_list); 617 } 618 619 /** 620 * msgdma_free_descriptors - Free channel descriptors 621 * @mdev: Pointer to the Altera mSGDMA device structure 622 */ 623 static void msgdma_free_descriptors(struct msgdma_device *mdev) 624 { 625 msgdma_free_desc_list(mdev, &mdev->active_list); 626 msgdma_free_desc_list(mdev, &mdev->pending_list); 627 msgdma_free_desc_list(mdev, &mdev->done_list); 628 } 629 630 /** 631 * msgdma_free_chan_resources - Free channel resources 632 * @dchan: DMA channel pointer 633 */ 634 static void msgdma_free_chan_resources(struct dma_chan *dchan) 635 { 636 struct msgdma_device *mdev = to_mdev(dchan); 637 638 spin_lock_bh(&mdev->lock); 639 msgdma_free_descriptors(mdev); 640 spin_unlock_bh(&mdev->lock); 641 kfree(mdev->sw_desq); 642 } 643 644 /** 645 * msgdma_alloc_chan_resources - Allocate channel resources 646 * @dchan: DMA channel 647 * 648 * Return: Number of descriptors on success and failure value on error 649 */ 650 static int msgdma_alloc_chan_resources(struct dma_chan *dchan) 651 { 652 struct msgdma_device *mdev = to_mdev(dchan); 653 struct msgdma_sw_desc *desc; 654 int i; 655 656 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); 657 if (!mdev->sw_desq) 658 return -ENOMEM; 659 660 mdev->idle = true; 661 mdev->desc_free_cnt = MSGDMA_DESC_NUM; 662 663 INIT_LIST_HEAD(&mdev->free_list); 664 665 for (i = 0; i < MSGDMA_DESC_NUM; i++) { 666 desc = mdev->sw_desq + i; 667 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); 668 desc->async_tx.tx_submit = msgdma_tx_submit; 669 list_add_tail(&desc->node, &mdev->free_list); 670 } 671 672 return MSGDMA_DESC_NUM; 673 } 674 675 /** 676 * msgdma_tasklet - Schedule completion tasklet 677 * @data: Pointer to the Altera sSGDMA channel structure 678 */ 679 static void msgdma_tasklet(unsigned long data) 680 { 681 struct msgdma_device *mdev = (struct msgdma_device *)data; 682 u32 count; 683 u32 __maybe_unused size; 684 u32 __maybe_unused status; 685 686 spin_lock(&mdev->lock); 687 688 /* Read number of responses that are available */ 689 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); 690 dev_dbg(mdev->dev, "%s (%d): response count=%d\n", 691 __func__, __LINE__, count); 692 693 while (count--) { 694 /* 695 * Read both longwords to purge this response from the FIFO 696 * On Avalon-MM implementations, size and status do not 697 * have any real values, like transferred bytes or error 698 * bits. So we need to just drop these values. 699 */ 700 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 701 status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); 702 703 msgdma_complete_descriptor(mdev); 704 msgdma_chan_desc_cleanup(mdev); 705 } 706 707 spin_unlock(&mdev->lock); 708 } 709 710 /** 711 * msgdma_irq_handler - Altera mSGDMA Interrupt handler 712 * @irq: IRQ number 713 * @data: Pointer to the Altera mSGDMA device structure 714 * 715 * Return: IRQ_HANDLED/IRQ_NONE 716 */ 717 static irqreturn_t msgdma_irq_handler(int irq, void *data) 718 { 719 struct msgdma_device *mdev = data; 720 u32 status; 721 722 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); 723 if ((status & MSGDMA_CSR_STAT_BUSY) == 0) { 724 /* Start next transfer if the DMA controller is idle */ 725 spin_lock(&mdev->lock); 726 mdev->idle = true; 727 msgdma_start_transfer(mdev); 728 spin_unlock(&mdev->lock); 729 } 730 731 tasklet_schedule(&mdev->irq_tasklet); 732 733 /* Clear interrupt in mSGDMA controller */ 734 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); 735 736 return IRQ_HANDLED; 737 } 738 739 /** 740 * msgdma_chan_remove - Channel remove function 741 * @mdev: Pointer to the Altera mSGDMA device structure 742 */ 743 static void msgdma_dev_remove(struct msgdma_device *mdev) 744 { 745 if (!mdev) 746 return; 747 748 devm_free_irq(mdev->dev, mdev->irq, mdev); 749 tasklet_kill(&mdev->irq_tasklet); 750 list_del(&mdev->dmachan.device_node); 751 } 752 753 static int request_and_map(struct platform_device *pdev, const char *name, 754 struct resource **res, void __iomem **ptr) 755 { 756 struct resource *region; 757 struct device *device = &pdev->dev; 758 759 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 760 if (*res == NULL) { 761 dev_err(device, "resource %s not defined\n", name); 762 return -ENODEV; 763 } 764 765 region = devm_request_mem_region(device, (*res)->start, 766 resource_size(*res), dev_name(device)); 767 if (region == NULL) { 768 dev_err(device, "unable to request %s\n", name); 769 return -EBUSY; 770 } 771 772 *ptr = devm_ioremap_nocache(device, region->start, 773 resource_size(region)); 774 if (*ptr == NULL) { 775 dev_err(device, "ioremap_nocache of %s failed!", name); 776 return -ENOMEM; 777 } 778 779 return 0; 780 } 781 782 /** 783 * msgdma_probe - Driver probe function 784 * @pdev: Pointer to the platform_device structure 785 * 786 * Return: '0' on success and failure value on error 787 */ 788 static int msgdma_probe(struct platform_device *pdev) 789 { 790 struct msgdma_device *mdev; 791 struct dma_device *dma_dev; 792 struct resource *dma_res; 793 int ret; 794 795 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); 796 if (!mdev) 797 return -ENOMEM; 798 799 mdev->dev = &pdev->dev; 800 801 /* Map CSR space */ 802 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); 803 if (ret) 804 return ret; 805 806 /* Map (extended) descriptor space */ 807 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); 808 if (ret) 809 return ret; 810 811 /* Map response space */ 812 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); 813 if (ret) 814 return ret; 815 816 platform_set_drvdata(pdev, mdev); 817 818 /* Get interrupt nr from platform data */ 819 mdev->irq = platform_get_irq(pdev, 0); 820 if (mdev->irq < 0) 821 return -ENXIO; 822 823 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, 824 0, dev_name(&pdev->dev), mdev); 825 if (ret) 826 return ret; 827 828 tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev); 829 830 dma_cookie_init(&mdev->dmachan); 831 832 spin_lock_init(&mdev->lock); 833 834 INIT_LIST_HEAD(&mdev->active_list); 835 INIT_LIST_HEAD(&mdev->pending_list); 836 INIT_LIST_HEAD(&mdev->done_list); 837 INIT_LIST_HEAD(&mdev->free_list); 838 839 dma_dev = &mdev->dmadev; 840 841 /* Set DMA capabilities */ 842 dma_cap_zero(dma_dev->cap_mask); 843 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 844 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 845 846 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 847 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 848 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) | 849 BIT(DMA_MEM_TO_MEM); 850 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 851 852 /* Init DMA link list */ 853 INIT_LIST_HEAD(&dma_dev->channels); 854 855 /* Set base routines */ 856 dma_dev->device_tx_status = dma_cookie_status; 857 dma_dev->device_issue_pending = msgdma_issue_pending; 858 dma_dev->dev = &pdev->dev; 859 860 dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES; 861 dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy; 862 dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg; 863 dma_dev->device_config = msgdma_dma_config; 864 865 dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources; 866 dma_dev->device_free_chan_resources = msgdma_free_chan_resources; 867 868 mdev->dmachan.device = dma_dev; 869 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); 870 871 /* Set DMA mask to 64 bits */ 872 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 873 if (ret) { 874 dev_warn(&pdev->dev, "unable to set coherent mask to 64"); 875 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 876 if (ret) 877 goto fail; 878 } 879 880 msgdma_reset(mdev); 881 882 ret = dma_async_device_register(dma_dev); 883 if (ret) 884 goto fail; 885 886 dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n"); 887 888 return 0; 889 890 fail: 891 msgdma_dev_remove(mdev); 892 893 return ret; 894 } 895 896 /** 897 * msgdma_dma_remove - Driver remove function 898 * @pdev: Pointer to the platform_device structure 899 * 900 * Return: Always '0' 901 */ 902 static int msgdma_remove(struct platform_device *pdev) 903 { 904 struct msgdma_device *mdev = platform_get_drvdata(pdev); 905 906 dma_async_device_unregister(&mdev->dmadev); 907 msgdma_dev_remove(mdev); 908 909 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n"); 910 911 return 0; 912 } 913 914 static struct platform_driver msgdma_driver = { 915 .driver = { 916 .name = "altera-msgdma", 917 }, 918 .probe = msgdma_probe, 919 .remove = msgdma_remove, 920 }; 921 922 module_platform_driver(msgdma_driver); 923 924 MODULE_ALIAS("platform:altera-msgdma"); 925 MODULE_DESCRIPTION("Altera mSGDMA driver"); 926 MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); 927 MODULE_LICENSE("GPL"); 928