1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 #include <linux/genalloc.h> 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TXINTSTATRAW 0x80 36 #define CPDMA_TXINTSTATMASKED 0x84 37 #define CPDMA_TXINTMASKSET 0x88 38 #define CPDMA_TXINTMASKCLEAR 0x8c 39 #define CPDMA_MACINVECTOR 0x90 40 #define CPDMA_MACEOIVECTOR 0x94 41 #define CPDMA_RXINTSTATRAW 0xa0 42 #define CPDMA_RXINTSTATMASKED 0xa4 43 #define CPDMA_RXINTMASKSET 0xa8 44 #define CPDMA_RXINTMASKCLEAR 0xac 45 #define CPDMA_DMAINTSTATRAW 0xb0 46 #define CPDMA_DMAINTSTATMASKED 0xb4 47 #define CPDMA_DMAINTMASKSET 0xb8 48 #define CPDMA_DMAINTMASKCLEAR 0xbc 49 #define CPDMA_DMAINT_HOSTERR BIT(1) 50 51 /* the following exist only if has_ext_regs is set */ 52 #define CPDMA_DMACONTROL 0x20 53 #define CPDMA_DMASTATUS 0x24 54 #define CPDMA_RXBUFFOFS 0x28 55 #define CPDMA_EM_CONTROL 0x2c 56 57 /* Descriptor mode bits */ 58 #define CPDMA_DESC_SOP BIT(31) 59 #define CPDMA_DESC_EOP BIT(30) 60 #define CPDMA_DESC_OWNER BIT(29) 61 #define CPDMA_DESC_EOQ BIT(28) 62 #define CPDMA_DESC_TD_COMPLETE BIT(27) 63 #define CPDMA_DESC_PASS_CRC BIT(26) 64 #define CPDMA_DESC_TO_PORT_EN BIT(20) 65 #define CPDMA_TO_PORT_SHIFT 16 66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 67 #define CPDMA_DESC_CRC_LEN 4 68 69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 70 71 struct cpdma_desc { 72 /* hardware fields */ 73 u32 hw_next; 74 u32 hw_buffer; 75 u32 hw_len; 76 u32 hw_mode; 77 /* software fields */ 78 void *sw_token; 79 u32 sw_buffer; 80 u32 sw_len; 81 }; 82 83 struct cpdma_desc_pool { 84 phys_addr_t phys; 85 dma_addr_t hw_addr; 86 void __iomem *iomap; /* ioremap map */ 87 void *cpumap; /* dma_alloc map */ 88 int desc_size, mem_size; 89 int num_desc, used_desc; 90 struct device *dev; 91 struct gen_pool *gen_pool; 92 }; 93 94 enum cpdma_state { 95 CPDMA_STATE_IDLE, 96 CPDMA_STATE_ACTIVE, 97 CPDMA_STATE_TEARDOWN, 98 }; 99 100 struct cpdma_ctlr { 101 enum cpdma_state state; 102 struct cpdma_params params; 103 struct device *dev; 104 struct cpdma_desc_pool *pool; 105 spinlock_t lock; 106 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 107 }; 108 109 struct cpdma_chan { 110 struct cpdma_desc __iomem *head, *tail; 111 void __iomem *hdp, *cp, *rxfree; 112 enum cpdma_state state; 113 struct cpdma_ctlr *ctlr; 114 int chan_num; 115 spinlock_t lock; 116 int count; 117 u32 desc_num; 118 u32 mask; 119 cpdma_handler_fn handler; 120 enum dma_data_direction dir; 121 struct cpdma_chan_stats stats; 122 /* offsets into dmaregs */ 123 int int_set, int_clear, td; 124 }; 125 126 /* The following make access to common cpdma_ctlr params more readable */ 127 #define dmaregs params.dmaregs 128 #define num_chan params.num_chan 129 130 /* various accessors */ 131 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 132 #define chan_read(chan, fld) __raw_readl((chan)->fld) 133 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 134 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 135 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 136 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 137 138 #define cpdma_desc_to_port(chan, mode, directed) \ 139 do { \ 140 if (!is_rx_chan(chan) && ((directed == 1) || \ 141 (directed == 2))) \ 142 mode |= (CPDMA_DESC_TO_PORT_EN | \ 143 (directed << CPDMA_TO_PORT_SHIFT)); \ 144 } while (0) 145 146 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 147 { 148 if (!pool) 149 return; 150 151 WARN_ON(pool->used_desc); 152 if (pool->cpumap) 153 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 154 pool->phys); 155 else 156 iounmap(pool->iomap); 157 } 158 159 /* 160 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 161 * emac) have dedicated on-chip memory for these descriptors. Some other 162 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 163 * abstract out these details 164 */ 165 static struct cpdma_desc_pool * 166 cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, 167 int size, int align) 168 { 169 struct cpdma_desc_pool *pool; 170 int ret; 171 172 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); 173 if (!pool) 174 goto gen_pool_create_fail; 175 176 pool->dev = dev; 177 pool->mem_size = size; 178 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 179 pool->num_desc = size / pool->desc_size; 180 181 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, 182 "cpdma"); 183 if (IS_ERR(pool->gen_pool)) { 184 dev_err(dev, "pool create failed %ld\n", 185 PTR_ERR(pool->gen_pool)); 186 goto gen_pool_create_fail; 187 } 188 189 if (phys) { 190 pool->phys = phys; 191 pool->iomap = ioremap(phys, size); /* should be memremap? */ 192 pool->hw_addr = hw_addr; 193 } else { 194 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, 195 GFP_KERNEL); 196 pool->iomap = (void __iomem __force *)pool->cpumap; 197 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 198 } 199 200 if (!pool->iomap) 201 goto gen_pool_create_fail; 202 203 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, 204 pool->phys, pool->mem_size, -1); 205 if (ret < 0) { 206 dev_err(dev, "pool add failed %d\n", ret); 207 goto gen_pool_add_virt_fail; 208 } 209 210 return pool; 211 212 gen_pool_add_virt_fail: 213 cpdma_desc_pool_destroy(pool); 214 gen_pool_create_fail: 215 return NULL; 216 } 217 218 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 219 struct cpdma_desc __iomem *desc) 220 { 221 if (!desc) 222 return 0; 223 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; 224 } 225 226 static inline struct cpdma_desc __iomem * 227 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 228 { 229 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 230 } 231 232 static struct cpdma_desc __iomem * 233 cpdma_desc_alloc(struct cpdma_desc_pool *pool) 234 { 235 struct cpdma_desc __iomem *desc = NULL; 236 237 desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, 238 pool->desc_size); 239 if (desc) 240 pool->used_desc++; 241 242 return desc; 243 } 244 245 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 246 struct cpdma_desc __iomem *desc, int num_desc) 247 { 248 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); 249 pool->used_desc--; 250 } 251 252 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 253 { 254 struct cpdma_ctlr *ctlr; 255 256 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); 257 if (!ctlr) 258 return NULL; 259 260 ctlr->state = CPDMA_STATE_IDLE; 261 ctlr->params = *params; 262 ctlr->dev = params->dev; 263 spin_lock_init(&ctlr->lock); 264 265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 266 ctlr->params.desc_mem_phys, 267 ctlr->params.desc_hw_addr, 268 ctlr->params.desc_mem_size, 269 ctlr->params.desc_align); 270 if (!ctlr->pool) 271 return NULL; 272 273 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 274 ctlr->num_chan = CPDMA_MAX_CHANNELS; 275 return ctlr; 276 } 277 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 278 279 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 280 { 281 unsigned long flags; 282 int i; 283 284 spin_lock_irqsave(&ctlr->lock, flags); 285 if (ctlr->state != CPDMA_STATE_IDLE) { 286 spin_unlock_irqrestore(&ctlr->lock, flags); 287 return -EBUSY; 288 } 289 290 if (ctlr->params.has_soft_reset) { 291 unsigned timeout = 10 * 100; 292 293 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 294 while (timeout) { 295 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 296 break; 297 udelay(10); 298 timeout--; 299 } 300 WARN_ON(!timeout); 301 } 302 303 for (i = 0; i < ctlr->num_chan; i++) { 304 __raw_writel(0, ctlr->params.txhdp + 4 * i); 305 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 306 __raw_writel(0, ctlr->params.txcp + 4 * i); 307 __raw_writel(0, ctlr->params.rxcp + 4 * i); 308 } 309 310 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 311 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 312 313 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 314 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 315 316 ctlr->state = CPDMA_STATE_ACTIVE; 317 318 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 319 if (ctlr->channels[i]) 320 cpdma_chan_start(ctlr->channels[i]); 321 } 322 spin_unlock_irqrestore(&ctlr->lock, flags); 323 return 0; 324 } 325 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 326 327 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 328 { 329 unsigned long flags; 330 int i; 331 332 spin_lock_irqsave(&ctlr->lock, flags); 333 if (ctlr->state == CPDMA_STATE_TEARDOWN) { 334 spin_unlock_irqrestore(&ctlr->lock, flags); 335 return -EINVAL; 336 } 337 338 ctlr->state = CPDMA_STATE_TEARDOWN; 339 340 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 341 if (ctlr->channels[i]) 342 cpdma_chan_stop(ctlr->channels[i]); 343 } 344 345 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 346 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 347 348 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 349 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 350 351 ctlr->state = CPDMA_STATE_IDLE; 352 353 spin_unlock_irqrestore(&ctlr->lock, flags); 354 return 0; 355 } 356 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 357 358 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 359 { 360 unsigned long flags; 361 int ret = 0, i; 362 363 if (!ctlr) 364 return -EINVAL; 365 366 spin_lock_irqsave(&ctlr->lock, flags); 367 if (ctlr->state != CPDMA_STATE_IDLE) 368 cpdma_ctlr_stop(ctlr); 369 370 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 371 cpdma_chan_destroy(ctlr->channels[i]); 372 373 cpdma_desc_pool_destroy(ctlr->pool); 374 spin_unlock_irqrestore(&ctlr->lock, flags); 375 return ret; 376 } 377 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 378 379 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 380 { 381 unsigned long flags; 382 int i, reg; 383 384 spin_lock_irqsave(&ctlr->lock, flags); 385 if (ctlr->state != CPDMA_STATE_ACTIVE) { 386 spin_unlock_irqrestore(&ctlr->lock, flags); 387 return -EINVAL; 388 } 389 390 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 391 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 392 393 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 394 if (ctlr->channels[i]) 395 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 396 } 397 398 spin_unlock_irqrestore(&ctlr->lock, flags); 399 return 0; 400 } 401 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 402 403 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 404 { 405 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 406 } 407 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 408 409 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 410 cpdma_handler_fn handler) 411 { 412 struct cpdma_chan *chan; 413 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 414 unsigned long flags; 415 416 if (__chan_linear(chan_num) >= ctlr->num_chan) 417 return NULL; 418 419 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); 420 if (!chan) 421 return ERR_PTR(-ENOMEM); 422 423 spin_lock_irqsave(&ctlr->lock, flags); 424 if (ctlr->channels[chan_num]) { 425 spin_unlock_irqrestore(&ctlr->lock, flags); 426 devm_kfree(ctlr->dev, chan); 427 return ERR_PTR(-EBUSY); 428 } 429 430 chan->ctlr = ctlr; 431 chan->state = CPDMA_STATE_IDLE; 432 chan->chan_num = chan_num; 433 chan->handler = handler; 434 chan->desc_num = ctlr->pool->num_desc / 2; 435 436 if (is_rx_chan(chan)) { 437 chan->hdp = ctlr->params.rxhdp + offset; 438 chan->cp = ctlr->params.rxcp + offset; 439 chan->rxfree = ctlr->params.rxfree + offset; 440 chan->int_set = CPDMA_RXINTMASKSET; 441 chan->int_clear = CPDMA_RXINTMASKCLEAR; 442 chan->td = CPDMA_RXTEARDOWN; 443 chan->dir = DMA_FROM_DEVICE; 444 } else { 445 chan->hdp = ctlr->params.txhdp + offset; 446 chan->cp = ctlr->params.txcp + offset; 447 chan->int_set = CPDMA_TXINTMASKSET; 448 chan->int_clear = CPDMA_TXINTMASKCLEAR; 449 chan->td = CPDMA_TXTEARDOWN; 450 chan->dir = DMA_TO_DEVICE; 451 } 452 chan->mask = BIT(chan_linear(chan)); 453 454 spin_lock_init(&chan->lock); 455 456 ctlr->channels[chan_num] = chan; 457 spin_unlock_irqrestore(&ctlr->lock, flags); 458 return chan; 459 } 460 EXPORT_SYMBOL_GPL(cpdma_chan_create); 461 462 int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) 463 { 464 return ctlr->pool->num_desc / 2; 465 } 466 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); 467 468 int cpdma_chan_destroy(struct cpdma_chan *chan) 469 { 470 struct cpdma_ctlr *ctlr; 471 unsigned long flags; 472 473 if (!chan) 474 return -EINVAL; 475 ctlr = chan->ctlr; 476 477 spin_lock_irqsave(&ctlr->lock, flags); 478 if (chan->state != CPDMA_STATE_IDLE) 479 cpdma_chan_stop(chan); 480 ctlr->channels[chan->chan_num] = NULL; 481 spin_unlock_irqrestore(&ctlr->lock, flags); 482 return 0; 483 } 484 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 485 486 int cpdma_chan_get_stats(struct cpdma_chan *chan, 487 struct cpdma_chan_stats *stats) 488 { 489 unsigned long flags; 490 if (!chan) 491 return -EINVAL; 492 spin_lock_irqsave(&chan->lock, flags); 493 memcpy(stats, &chan->stats, sizeof(*stats)); 494 spin_unlock_irqrestore(&chan->lock, flags); 495 return 0; 496 } 497 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 498 499 static void __cpdma_chan_submit(struct cpdma_chan *chan, 500 struct cpdma_desc __iomem *desc) 501 { 502 struct cpdma_ctlr *ctlr = chan->ctlr; 503 struct cpdma_desc __iomem *prev = chan->tail; 504 struct cpdma_desc_pool *pool = ctlr->pool; 505 dma_addr_t desc_dma; 506 u32 mode; 507 508 desc_dma = desc_phys(pool, desc); 509 510 /* simple case - idle channel */ 511 if (!chan->head) { 512 chan->stats.head_enqueue++; 513 chan->head = desc; 514 chan->tail = desc; 515 if (chan->state == CPDMA_STATE_ACTIVE) 516 chan_write(chan, hdp, desc_dma); 517 return; 518 } 519 520 /* first chain the descriptor at the tail of the list */ 521 desc_write(prev, hw_next, desc_dma); 522 chan->tail = desc; 523 chan->stats.tail_enqueue++; 524 525 /* next check if EOQ has been triggered already */ 526 mode = desc_read(prev, hw_mode); 527 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 528 (chan->state == CPDMA_STATE_ACTIVE)) { 529 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 530 chan_write(chan, hdp, desc_dma); 531 chan->stats.misqueued++; 532 } 533 } 534 535 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 536 int len, int directed) 537 { 538 struct cpdma_ctlr *ctlr = chan->ctlr; 539 struct cpdma_desc __iomem *desc; 540 dma_addr_t buffer; 541 unsigned long flags; 542 u32 mode; 543 int ret = 0; 544 545 spin_lock_irqsave(&chan->lock, flags); 546 547 if (chan->state == CPDMA_STATE_TEARDOWN) { 548 ret = -EINVAL; 549 goto unlock_ret; 550 } 551 552 if (chan->count >= chan->desc_num) { 553 chan->stats.desc_alloc_fail++; 554 ret = -ENOMEM; 555 goto unlock_ret; 556 } 557 558 desc = cpdma_desc_alloc(ctlr->pool); 559 if (!desc) { 560 chan->stats.desc_alloc_fail++; 561 ret = -ENOMEM; 562 goto unlock_ret; 563 } 564 565 if (len < ctlr->params.min_packet_size) { 566 len = ctlr->params.min_packet_size; 567 chan->stats.runt_transmit_buff++; 568 } 569 570 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 571 ret = dma_mapping_error(ctlr->dev, buffer); 572 if (ret) { 573 cpdma_desc_free(ctlr->pool, desc, 1); 574 ret = -EINVAL; 575 goto unlock_ret; 576 } 577 578 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 579 cpdma_desc_to_port(chan, mode, directed); 580 581 desc_write(desc, hw_next, 0); 582 desc_write(desc, hw_buffer, buffer); 583 desc_write(desc, hw_len, len); 584 desc_write(desc, hw_mode, mode | len); 585 desc_write(desc, sw_token, token); 586 desc_write(desc, sw_buffer, buffer); 587 desc_write(desc, sw_len, len); 588 589 __cpdma_chan_submit(chan, desc); 590 591 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 592 chan_write(chan, rxfree, 1); 593 594 chan->count++; 595 596 unlock_ret: 597 spin_unlock_irqrestore(&chan->lock, flags); 598 return ret; 599 } 600 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 601 602 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 603 { 604 struct cpdma_ctlr *ctlr = chan->ctlr; 605 struct cpdma_desc_pool *pool = ctlr->pool; 606 bool free_tx_desc; 607 unsigned long flags; 608 609 spin_lock_irqsave(&chan->lock, flags); 610 free_tx_desc = (chan->count < chan->desc_num) && 611 gen_pool_avail(pool->gen_pool); 612 spin_unlock_irqrestore(&chan->lock, flags); 613 return free_tx_desc; 614 } 615 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 616 617 static void __cpdma_chan_free(struct cpdma_chan *chan, 618 struct cpdma_desc __iomem *desc, 619 int outlen, int status) 620 { 621 struct cpdma_ctlr *ctlr = chan->ctlr; 622 struct cpdma_desc_pool *pool = ctlr->pool; 623 dma_addr_t buff_dma; 624 int origlen; 625 void *token; 626 627 token = (void *)desc_read(desc, sw_token); 628 buff_dma = desc_read(desc, sw_buffer); 629 origlen = desc_read(desc, sw_len); 630 631 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 632 cpdma_desc_free(pool, desc, 1); 633 (*chan->handler)(token, outlen, status); 634 } 635 636 static int __cpdma_chan_process(struct cpdma_chan *chan) 637 { 638 struct cpdma_ctlr *ctlr = chan->ctlr; 639 struct cpdma_desc __iomem *desc; 640 int status, outlen; 641 int cb_status = 0; 642 struct cpdma_desc_pool *pool = ctlr->pool; 643 dma_addr_t desc_dma; 644 unsigned long flags; 645 646 spin_lock_irqsave(&chan->lock, flags); 647 648 desc = chan->head; 649 if (!desc) { 650 chan->stats.empty_dequeue++; 651 status = -ENOENT; 652 goto unlock_ret; 653 } 654 desc_dma = desc_phys(pool, desc); 655 656 status = __raw_readl(&desc->hw_mode); 657 outlen = status & 0x7ff; 658 if (status & CPDMA_DESC_OWNER) { 659 chan->stats.busy_dequeue++; 660 status = -EBUSY; 661 goto unlock_ret; 662 } 663 664 if (status & CPDMA_DESC_PASS_CRC) 665 outlen -= CPDMA_DESC_CRC_LEN; 666 667 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 668 CPDMA_DESC_PORT_MASK); 669 670 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 671 chan_write(chan, cp, desc_dma); 672 chan->count--; 673 chan->stats.good_dequeue++; 674 675 if (status & CPDMA_DESC_EOQ) { 676 chan->stats.requeue++; 677 chan_write(chan, hdp, desc_phys(pool, chan->head)); 678 } 679 680 spin_unlock_irqrestore(&chan->lock, flags); 681 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 682 cb_status = -ENOSYS; 683 else 684 cb_status = status; 685 686 __cpdma_chan_free(chan, desc, outlen, cb_status); 687 return status; 688 689 unlock_ret: 690 spin_unlock_irqrestore(&chan->lock, flags); 691 return status; 692 } 693 694 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 695 { 696 int used = 0, ret = 0; 697 698 if (chan->state != CPDMA_STATE_ACTIVE) 699 return -EINVAL; 700 701 while (used < quota) { 702 ret = __cpdma_chan_process(chan); 703 if (ret < 0) 704 break; 705 used++; 706 } 707 return used; 708 } 709 EXPORT_SYMBOL_GPL(cpdma_chan_process); 710 711 int cpdma_chan_start(struct cpdma_chan *chan) 712 { 713 struct cpdma_ctlr *ctlr = chan->ctlr; 714 struct cpdma_desc_pool *pool = ctlr->pool; 715 unsigned long flags; 716 717 spin_lock_irqsave(&chan->lock, flags); 718 if (chan->state != CPDMA_STATE_IDLE) { 719 spin_unlock_irqrestore(&chan->lock, flags); 720 return -EBUSY; 721 } 722 if (ctlr->state != CPDMA_STATE_ACTIVE) { 723 spin_unlock_irqrestore(&chan->lock, flags); 724 return -EINVAL; 725 } 726 dma_reg_write(ctlr, chan->int_set, chan->mask); 727 chan->state = CPDMA_STATE_ACTIVE; 728 if (chan->head) { 729 chan_write(chan, hdp, desc_phys(pool, chan->head)); 730 if (chan->rxfree) 731 chan_write(chan, rxfree, chan->count); 732 } 733 734 spin_unlock_irqrestore(&chan->lock, flags); 735 return 0; 736 } 737 EXPORT_SYMBOL_GPL(cpdma_chan_start); 738 739 int cpdma_chan_stop(struct cpdma_chan *chan) 740 { 741 struct cpdma_ctlr *ctlr = chan->ctlr; 742 struct cpdma_desc_pool *pool = ctlr->pool; 743 unsigned long flags; 744 int ret; 745 unsigned timeout; 746 747 spin_lock_irqsave(&chan->lock, flags); 748 if (chan->state == CPDMA_STATE_TEARDOWN) { 749 spin_unlock_irqrestore(&chan->lock, flags); 750 return -EINVAL; 751 } 752 753 chan->state = CPDMA_STATE_TEARDOWN; 754 dma_reg_write(ctlr, chan->int_clear, chan->mask); 755 756 /* trigger teardown */ 757 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 758 759 /* wait for teardown complete */ 760 timeout = 100 * 100; /* 100 ms */ 761 while (timeout) { 762 u32 cp = chan_read(chan, cp); 763 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 764 break; 765 udelay(10); 766 timeout--; 767 } 768 WARN_ON(!timeout); 769 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 770 771 /* handle completed packets */ 772 spin_unlock_irqrestore(&chan->lock, flags); 773 do { 774 ret = __cpdma_chan_process(chan); 775 if (ret < 0) 776 break; 777 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 778 spin_lock_irqsave(&chan->lock, flags); 779 780 /* remaining packets haven't been tx/rx'ed, clean them up */ 781 while (chan->head) { 782 struct cpdma_desc __iomem *desc = chan->head; 783 dma_addr_t next_dma; 784 785 next_dma = desc_read(desc, hw_next); 786 chan->head = desc_from_phys(pool, next_dma); 787 chan->count--; 788 chan->stats.teardown_dequeue++; 789 790 /* issue callback without locks held */ 791 spin_unlock_irqrestore(&chan->lock, flags); 792 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 793 spin_lock_irqsave(&chan->lock, flags); 794 } 795 796 chan->state = CPDMA_STATE_IDLE; 797 spin_unlock_irqrestore(&chan->lock, flags); 798 return 0; 799 } 800 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 801 802 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 803 { 804 unsigned long flags; 805 806 spin_lock_irqsave(&chan->lock, flags); 807 if (chan->state != CPDMA_STATE_ACTIVE) { 808 spin_unlock_irqrestore(&chan->lock, flags); 809 return -EINVAL; 810 } 811 812 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 813 chan->mask); 814 spin_unlock_irqrestore(&chan->lock, flags); 815 816 return 0; 817 } 818 819 struct cpdma_control_info { 820 u32 reg; 821 u32 shift, mask; 822 int access; 823 #define ACCESS_RO BIT(0) 824 #define ACCESS_WO BIT(1) 825 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 826 }; 827 828 static struct cpdma_control_info controls[] = { 829 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 830 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 831 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 832 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 833 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 834 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 835 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 836 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 837 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 838 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 839 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 840 }; 841 842 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 843 { 844 unsigned long flags; 845 struct cpdma_control_info *info = &controls[control]; 846 int ret; 847 848 spin_lock_irqsave(&ctlr->lock, flags); 849 850 ret = -ENOTSUPP; 851 if (!ctlr->params.has_ext_regs) 852 goto unlock_ret; 853 854 ret = -EINVAL; 855 if (ctlr->state != CPDMA_STATE_ACTIVE) 856 goto unlock_ret; 857 858 ret = -ENOENT; 859 if (control < 0 || control >= ARRAY_SIZE(controls)) 860 goto unlock_ret; 861 862 ret = -EPERM; 863 if ((info->access & ACCESS_RO) != ACCESS_RO) 864 goto unlock_ret; 865 866 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 867 868 unlock_ret: 869 spin_unlock_irqrestore(&ctlr->lock, flags); 870 return ret; 871 } 872 873 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 874 { 875 unsigned long flags; 876 struct cpdma_control_info *info = &controls[control]; 877 int ret; 878 u32 val; 879 880 spin_lock_irqsave(&ctlr->lock, flags); 881 882 ret = -ENOTSUPP; 883 if (!ctlr->params.has_ext_regs) 884 goto unlock_ret; 885 886 ret = -EINVAL; 887 if (ctlr->state != CPDMA_STATE_ACTIVE) 888 goto unlock_ret; 889 890 ret = -ENOENT; 891 if (control < 0 || control >= ARRAY_SIZE(controls)) 892 goto unlock_ret; 893 894 ret = -EPERM; 895 if ((info->access & ACCESS_WO) != ACCESS_WO) 896 goto unlock_ret; 897 898 val = dma_reg_read(ctlr, info->reg); 899 val &= ~(info->mask << info->shift); 900 val |= (value & info->mask) << info->shift; 901 dma_reg_write(ctlr, info->reg, val); 902 ret = 0; 903 904 unlock_ret: 905 spin_unlock_irqrestore(&ctlr->lock, flags); 906 return ret; 907 } 908 EXPORT_SYMBOL_GPL(cpdma_control_set); 909 910 MODULE_LICENSE("GPL"); 911