1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TXINTSTATRAW 0x80 36 #define CPDMA_TXINTSTATMASKED 0x84 37 #define CPDMA_TXINTMASKSET 0x88 38 #define CPDMA_TXINTMASKCLEAR 0x8c 39 #define CPDMA_MACINVECTOR 0x90 40 #define CPDMA_MACEOIVECTOR 0x94 41 #define CPDMA_RXINTSTATRAW 0xa0 42 #define CPDMA_RXINTSTATMASKED 0xa4 43 #define CPDMA_RXINTMASKSET 0xa8 44 #define CPDMA_RXINTMASKCLEAR 0xac 45 #define CPDMA_DMAINTSTATRAW 0xb0 46 #define CPDMA_DMAINTSTATMASKED 0xb4 47 #define CPDMA_DMAINTMASKSET 0xb8 48 #define CPDMA_DMAINTMASKCLEAR 0xbc 49 #define CPDMA_DMAINT_HOSTERR BIT(1) 50 51 /* the following exist only if has_ext_regs is set */ 52 #define CPDMA_DMACONTROL 0x20 53 #define CPDMA_DMASTATUS 0x24 54 #define CPDMA_RXBUFFOFS 0x28 55 #define CPDMA_EM_CONTROL 0x2c 56 57 /* Descriptor mode bits */ 58 #define CPDMA_DESC_SOP BIT(31) 59 #define CPDMA_DESC_EOP BIT(30) 60 #define CPDMA_DESC_OWNER BIT(29) 61 #define CPDMA_DESC_EOQ BIT(28) 62 #define CPDMA_DESC_TD_COMPLETE BIT(27) 63 #define CPDMA_DESC_PASS_CRC BIT(26) 64 #define CPDMA_DESC_TO_PORT_EN BIT(20) 65 #define CPDMA_TO_PORT_SHIFT 16 66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 67 68 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 69 70 struct cpdma_desc { 71 /* hardware fields */ 72 u32 hw_next; 73 u32 hw_buffer; 74 u32 hw_len; 75 u32 hw_mode; 76 /* software fields */ 77 void *sw_token; 78 u32 sw_buffer; 79 u32 sw_len; 80 }; 81 82 struct cpdma_desc_pool { 83 u32 phys; 84 u32 hw_addr; 85 void __iomem *iomap; /* ioremap map */ 86 void *cpumap; /* dma_alloc map */ 87 int desc_size, mem_size; 88 int num_desc, used_desc; 89 unsigned long *bitmap; 90 struct device *dev; 91 spinlock_t lock; 92 }; 93 94 enum cpdma_state { 95 CPDMA_STATE_IDLE, 96 CPDMA_STATE_ACTIVE, 97 CPDMA_STATE_TEARDOWN, 98 }; 99 100 static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 101 102 struct cpdma_ctlr { 103 enum cpdma_state state; 104 struct cpdma_params params; 105 struct device *dev; 106 struct cpdma_desc_pool *pool; 107 spinlock_t lock; 108 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 109 }; 110 111 struct cpdma_chan { 112 struct cpdma_desc __iomem *head, *tail; 113 void __iomem *hdp, *cp, *rxfree; 114 enum cpdma_state state; 115 struct cpdma_ctlr *ctlr; 116 int chan_num; 117 spinlock_t lock; 118 int count; 119 u32 mask; 120 cpdma_handler_fn handler; 121 enum dma_data_direction dir; 122 struct cpdma_chan_stats stats; 123 /* offsets into dmaregs */ 124 int int_set, int_clear, td; 125 }; 126 127 /* The following make access to common cpdma_ctlr params more readable */ 128 #define dmaregs params.dmaregs 129 #define num_chan params.num_chan 130 131 /* various accessors */ 132 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 133 #define chan_read(chan, fld) __raw_readl((chan)->fld) 134 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 135 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 136 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 137 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 138 139 #define cpdma_desc_to_port(chan, mode, directed) \ 140 do { \ 141 if (!is_rx_chan(chan) && ((directed == 1) || \ 142 (directed == 2))) \ 143 mode |= (CPDMA_DESC_TO_PORT_EN | \ 144 (directed << CPDMA_TO_PORT_SHIFT)); \ 145 } while (0) 146 147 /* 148 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 149 * emac) have dedicated on-chip memory for these descriptors. Some other 150 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 151 * abstract out these details 152 */ 153 static struct cpdma_desc_pool * 154 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 155 int size, int align) 156 { 157 int bitmap_size; 158 struct cpdma_desc_pool *pool; 159 160 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 161 if (!pool) 162 return NULL; 163 164 spin_lock_init(&pool->lock); 165 166 pool->dev = dev; 167 pool->mem_size = size; 168 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 169 pool->num_desc = size / pool->desc_size; 170 171 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 172 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 173 if (!pool->bitmap) 174 goto fail; 175 176 if (phys) { 177 pool->phys = phys; 178 pool->iomap = ioremap(phys, size); 179 pool->hw_addr = hw_addr; 180 } else { 181 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 182 GFP_KERNEL); 183 pool->iomap = pool->cpumap; 184 pool->hw_addr = pool->phys; 185 } 186 187 if (pool->iomap) 188 return pool; 189 190 fail: 191 kfree(pool->bitmap); 192 kfree(pool); 193 return NULL; 194 } 195 196 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 197 { 198 unsigned long flags; 199 200 if (!pool) 201 return; 202 203 spin_lock_irqsave(&pool->lock, flags); 204 WARN_ON(pool->used_desc); 205 kfree(pool->bitmap); 206 if (pool->cpumap) { 207 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 208 pool->phys); 209 } else { 210 iounmap(pool->iomap); 211 } 212 spin_unlock_irqrestore(&pool->lock, flags); 213 kfree(pool); 214 } 215 216 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 217 struct cpdma_desc __iomem *desc) 218 { 219 if (!desc) 220 return 0; 221 return pool->hw_addr + (__force dma_addr_t)desc - 222 (__force dma_addr_t)pool->iomap; 223 } 224 225 static inline struct cpdma_desc __iomem * 226 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 227 { 228 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 229 } 230 231 static struct cpdma_desc __iomem * 232 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) 233 { 234 unsigned long flags; 235 int index; 236 int desc_start; 237 int desc_end; 238 struct cpdma_desc __iomem *desc = NULL; 239 240 spin_lock_irqsave(&pool->lock, flags); 241 242 if (is_rx) { 243 desc_start = 0; 244 desc_end = pool->num_desc/2; 245 } else { 246 desc_start = pool->num_desc/2; 247 desc_end = pool->num_desc; 248 } 249 250 index = bitmap_find_next_zero_area(pool->bitmap, 251 desc_end, desc_start, num_desc, 0); 252 if (index < desc_end) { 253 bitmap_set(pool->bitmap, index, num_desc); 254 desc = pool->iomap + pool->desc_size * index; 255 pool->used_desc++; 256 } 257 258 spin_unlock_irqrestore(&pool->lock, flags); 259 return desc; 260 } 261 262 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 263 struct cpdma_desc __iomem *desc, int num_desc) 264 { 265 unsigned long flags, index; 266 267 index = ((unsigned long)desc - (unsigned long)pool->iomap) / 268 pool->desc_size; 269 spin_lock_irqsave(&pool->lock, flags); 270 bitmap_clear(pool->bitmap, index, num_desc); 271 pool->used_desc--; 272 spin_unlock_irqrestore(&pool->lock, flags); 273 } 274 275 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 276 { 277 struct cpdma_ctlr *ctlr; 278 279 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 280 if (!ctlr) 281 return NULL; 282 283 ctlr->state = CPDMA_STATE_IDLE; 284 ctlr->params = *params; 285 ctlr->dev = params->dev; 286 spin_lock_init(&ctlr->lock); 287 288 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 289 ctlr->params.desc_mem_phys, 290 ctlr->params.desc_hw_addr, 291 ctlr->params.desc_mem_size, 292 ctlr->params.desc_align); 293 if (!ctlr->pool) { 294 kfree(ctlr); 295 return NULL; 296 } 297 298 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 299 ctlr->num_chan = CPDMA_MAX_CHANNELS; 300 return ctlr; 301 } 302 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 303 304 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 305 { 306 unsigned long flags; 307 int i; 308 309 spin_lock_irqsave(&ctlr->lock, flags); 310 if (ctlr->state != CPDMA_STATE_IDLE) { 311 spin_unlock_irqrestore(&ctlr->lock, flags); 312 return -EBUSY; 313 } 314 315 if (ctlr->params.has_soft_reset) { 316 unsigned timeout = 10 * 100; 317 318 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 319 while (timeout) { 320 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 321 break; 322 udelay(10); 323 timeout--; 324 } 325 WARN_ON(!timeout); 326 } 327 328 for (i = 0; i < ctlr->num_chan; i++) { 329 __raw_writel(0, ctlr->params.txhdp + 4 * i); 330 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 331 __raw_writel(0, ctlr->params.txcp + 4 * i); 332 __raw_writel(0, ctlr->params.rxcp + 4 * i); 333 } 334 335 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 336 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 337 338 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 339 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 340 341 ctlr->state = CPDMA_STATE_ACTIVE; 342 343 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 344 if (ctlr->channels[i]) 345 cpdma_chan_start(ctlr->channels[i]); 346 } 347 spin_unlock_irqrestore(&ctlr->lock, flags); 348 return 0; 349 } 350 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 351 352 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 353 { 354 unsigned long flags; 355 int i; 356 357 spin_lock_irqsave(&ctlr->lock, flags); 358 if (ctlr->state != CPDMA_STATE_ACTIVE) { 359 spin_unlock_irqrestore(&ctlr->lock, flags); 360 return -EINVAL; 361 } 362 363 ctlr->state = CPDMA_STATE_TEARDOWN; 364 365 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 366 if (ctlr->channels[i]) 367 cpdma_chan_stop(ctlr->channels[i]); 368 } 369 370 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 371 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 372 373 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 374 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 375 376 ctlr->state = CPDMA_STATE_IDLE; 377 378 spin_unlock_irqrestore(&ctlr->lock, flags); 379 return 0; 380 } 381 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 382 383 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 384 { 385 struct device *dev = ctlr->dev; 386 unsigned long flags; 387 int i; 388 389 spin_lock_irqsave(&ctlr->lock, flags); 390 391 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); 392 393 dev_info(dev, "CPDMA: txidver: %x", 394 dma_reg_read(ctlr, CPDMA_TXIDVER)); 395 dev_info(dev, "CPDMA: txcontrol: %x", 396 dma_reg_read(ctlr, CPDMA_TXCONTROL)); 397 dev_info(dev, "CPDMA: txteardown: %x", 398 dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); 399 dev_info(dev, "CPDMA: rxidver: %x", 400 dma_reg_read(ctlr, CPDMA_RXIDVER)); 401 dev_info(dev, "CPDMA: rxcontrol: %x", 402 dma_reg_read(ctlr, CPDMA_RXCONTROL)); 403 dev_info(dev, "CPDMA: softreset: %x", 404 dma_reg_read(ctlr, CPDMA_SOFTRESET)); 405 dev_info(dev, "CPDMA: rxteardown: %x", 406 dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); 407 dev_info(dev, "CPDMA: txintstatraw: %x", 408 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); 409 dev_info(dev, "CPDMA: txintstatmasked: %x", 410 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); 411 dev_info(dev, "CPDMA: txintmaskset: %x", 412 dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); 413 dev_info(dev, "CPDMA: txintmaskclear: %x", 414 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); 415 dev_info(dev, "CPDMA: macinvector: %x", 416 dma_reg_read(ctlr, CPDMA_MACINVECTOR)); 417 dev_info(dev, "CPDMA: maceoivector: %x", 418 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); 419 dev_info(dev, "CPDMA: rxintstatraw: %x", 420 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); 421 dev_info(dev, "CPDMA: rxintstatmasked: %x", 422 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); 423 dev_info(dev, "CPDMA: rxintmaskset: %x", 424 dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); 425 dev_info(dev, "CPDMA: rxintmaskclear: %x", 426 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); 427 dev_info(dev, "CPDMA: dmaintstatraw: %x", 428 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); 429 dev_info(dev, "CPDMA: dmaintstatmasked: %x", 430 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); 431 dev_info(dev, "CPDMA: dmaintmaskset: %x", 432 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); 433 dev_info(dev, "CPDMA: dmaintmaskclear: %x", 434 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); 435 436 if (!ctlr->params.has_ext_regs) { 437 dev_info(dev, "CPDMA: dmacontrol: %x", 438 dma_reg_read(ctlr, CPDMA_DMACONTROL)); 439 dev_info(dev, "CPDMA: dmastatus: %x", 440 dma_reg_read(ctlr, CPDMA_DMASTATUS)); 441 dev_info(dev, "CPDMA: rxbuffofs: %x", 442 dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); 443 } 444 445 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 446 if (ctlr->channels[i]) 447 cpdma_chan_dump(ctlr->channels[i]); 448 449 spin_unlock_irqrestore(&ctlr->lock, flags); 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); 453 454 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 455 { 456 unsigned long flags; 457 int ret = 0, i; 458 459 if (!ctlr) 460 return -EINVAL; 461 462 spin_lock_irqsave(&ctlr->lock, flags); 463 if (ctlr->state != CPDMA_STATE_IDLE) 464 cpdma_ctlr_stop(ctlr); 465 466 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 467 cpdma_chan_destroy(ctlr->channels[i]); 468 469 cpdma_desc_pool_destroy(ctlr->pool); 470 spin_unlock_irqrestore(&ctlr->lock, flags); 471 kfree(ctlr); 472 return ret; 473 } 474 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 475 476 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 477 { 478 unsigned long flags; 479 int i, reg; 480 481 spin_lock_irqsave(&ctlr->lock, flags); 482 if (ctlr->state != CPDMA_STATE_ACTIVE) { 483 spin_unlock_irqrestore(&ctlr->lock, flags); 484 return -EINVAL; 485 } 486 487 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 488 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 489 490 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 491 if (ctlr->channels[i]) 492 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 493 } 494 495 spin_unlock_irqrestore(&ctlr->lock, flags); 496 return 0; 497 } 498 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 499 500 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 501 { 502 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 503 } 504 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 505 506 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 507 cpdma_handler_fn handler) 508 { 509 struct cpdma_chan *chan; 510 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 511 unsigned long flags; 512 513 if (__chan_linear(chan_num) >= ctlr->num_chan) 514 return NULL; 515 516 ret = -ENOMEM; 517 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 518 if (!chan) 519 goto err_chan_alloc; 520 521 spin_lock_irqsave(&ctlr->lock, flags); 522 ret = -EBUSY; 523 if (ctlr->channels[chan_num]) 524 goto err_chan_busy; 525 526 chan->ctlr = ctlr; 527 chan->state = CPDMA_STATE_IDLE; 528 chan->chan_num = chan_num; 529 chan->handler = handler; 530 531 if (is_rx_chan(chan)) { 532 chan->hdp = ctlr->params.rxhdp + offset; 533 chan->cp = ctlr->params.rxcp + offset; 534 chan->rxfree = ctlr->params.rxfree + offset; 535 chan->int_set = CPDMA_RXINTMASKSET; 536 chan->int_clear = CPDMA_RXINTMASKCLEAR; 537 chan->td = CPDMA_RXTEARDOWN; 538 chan->dir = DMA_FROM_DEVICE; 539 } else { 540 chan->hdp = ctlr->params.txhdp + offset; 541 chan->cp = ctlr->params.txcp + offset; 542 chan->int_set = CPDMA_TXINTMASKSET; 543 chan->int_clear = CPDMA_TXINTMASKCLEAR; 544 chan->td = CPDMA_TXTEARDOWN; 545 chan->dir = DMA_TO_DEVICE; 546 } 547 chan->mask = BIT(chan_linear(chan)); 548 549 spin_lock_init(&chan->lock); 550 551 ctlr->channels[chan_num] = chan; 552 spin_unlock_irqrestore(&ctlr->lock, flags); 553 return chan; 554 555 err_chan_busy: 556 spin_unlock_irqrestore(&ctlr->lock, flags); 557 kfree(chan); 558 err_chan_alloc: 559 return ERR_PTR(ret); 560 } 561 EXPORT_SYMBOL_GPL(cpdma_chan_create); 562 563 int cpdma_chan_destroy(struct cpdma_chan *chan) 564 { 565 struct cpdma_ctlr *ctlr; 566 unsigned long flags; 567 568 if (!chan) 569 return -EINVAL; 570 ctlr = chan->ctlr; 571 572 spin_lock_irqsave(&ctlr->lock, flags); 573 if (chan->state != CPDMA_STATE_IDLE) 574 cpdma_chan_stop(chan); 575 ctlr->channels[chan->chan_num] = NULL; 576 spin_unlock_irqrestore(&ctlr->lock, flags); 577 kfree(chan); 578 return 0; 579 } 580 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 581 582 int cpdma_chan_get_stats(struct cpdma_chan *chan, 583 struct cpdma_chan_stats *stats) 584 { 585 unsigned long flags; 586 if (!chan) 587 return -EINVAL; 588 spin_lock_irqsave(&chan->lock, flags); 589 memcpy(stats, &chan->stats, sizeof(*stats)); 590 spin_unlock_irqrestore(&chan->lock, flags); 591 return 0; 592 } 593 594 int cpdma_chan_dump(struct cpdma_chan *chan) 595 { 596 unsigned long flags; 597 struct device *dev = chan->ctlr->dev; 598 599 spin_lock_irqsave(&chan->lock, flags); 600 601 dev_info(dev, "channel %d (%s %d) state %s", 602 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", 603 chan_linear(chan), cpdma_state_str[chan->state]); 604 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); 605 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); 606 if (chan->rxfree) { 607 dev_info(dev, "\trxfree: %x\n", 608 chan_read(chan, rxfree)); 609 } 610 611 dev_info(dev, "\tstats head_enqueue: %d\n", 612 chan->stats.head_enqueue); 613 dev_info(dev, "\tstats tail_enqueue: %d\n", 614 chan->stats.tail_enqueue); 615 dev_info(dev, "\tstats pad_enqueue: %d\n", 616 chan->stats.pad_enqueue); 617 dev_info(dev, "\tstats misqueued: %d\n", 618 chan->stats.misqueued); 619 dev_info(dev, "\tstats desc_alloc_fail: %d\n", 620 chan->stats.desc_alloc_fail); 621 dev_info(dev, "\tstats pad_alloc_fail: %d\n", 622 chan->stats.pad_alloc_fail); 623 dev_info(dev, "\tstats runt_receive_buff: %d\n", 624 chan->stats.runt_receive_buff); 625 dev_info(dev, "\tstats runt_transmit_buff: %d\n", 626 chan->stats.runt_transmit_buff); 627 dev_info(dev, "\tstats empty_dequeue: %d\n", 628 chan->stats.empty_dequeue); 629 dev_info(dev, "\tstats busy_dequeue: %d\n", 630 chan->stats.busy_dequeue); 631 dev_info(dev, "\tstats good_dequeue: %d\n", 632 chan->stats.good_dequeue); 633 dev_info(dev, "\tstats requeue: %d\n", 634 chan->stats.requeue); 635 dev_info(dev, "\tstats teardown_dequeue: %d\n", 636 chan->stats.teardown_dequeue); 637 638 spin_unlock_irqrestore(&chan->lock, flags); 639 return 0; 640 } 641 642 static void __cpdma_chan_submit(struct cpdma_chan *chan, 643 struct cpdma_desc __iomem *desc) 644 { 645 struct cpdma_ctlr *ctlr = chan->ctlr; 646 struct cpdma_desc __iomem *prev = chan->tail; 647 struct cpdma_desc_pool *pool = ctlr->pool; 648 dma_addr_t desc_dma; 649 u32 mode; 650 651 desc_dma = desc_phys(pool, desc); 652 653 /* simple case - idle channel */ 654 if (!chan->head) { 655 chan->stats.head_enqueue++; 656 chan->head = desc; 657 chan->tail = desc; 658 if (chan->state == CPDMA_STATE_ACTIVE) 659 chan_write(chan, hdp, desc_dma); 660 return; 661 } 662 663 /* first chain the descriptor at the tail of the list */ 664 desc_write(prev, hw_next, desc_dma); 665 chan->tail = desc; 666 chan->stats.tail_enqueue++; 667 668 /* next check if EOQ has been triggered already */ 669 mode = desc_read(prev, hw_mode); 670 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 671 (chan->state == CPDMA_STATE_ACTIVE)) { 672 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 673 chan_write(chan, hdp, desc_dma); 674 chan->stats.misqueued++; 675 } 676 } 677 678 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 679 int len, int directed) 680 { 681 struct cpdma_ctlr *ctlr = chan->ctlr; 682 struct cpdma_desc __iomem *desc; 683 dma_addr_t buffer; 684 unsigned long flags; 685 u32 mode; 686 int ret = 0; 687 688 spin_lock_irqsave(&chan->lock, flags); 689 690 if (chan->state == CPDMA_STATE_TEARDOWN) { 691 ret = -EINVAL; 692 goto unlock_ret; 693 } 694 695 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); 696 if (!desc) { 697 chan->stats.desc_alloc_fail++; 698 ret = -ENOMEM; 699 goto unlock_ret; 700 } 701 702 if (len < ctlr->params.min_packet_size) { 703 len = ctlr->params.min_packet_size; 704 chan->stats.runt_transmit_buff++; 705 } 706 707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 708 ret = dma_mapping_error(ctlr->dev, buffer); 709 if (ret) { 710 cpdma_desc_free(ctlr->pool, desc, 1); 711 ret = -EINVAL; 712 goto unlock_ret; 713 } 714 715 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 716 cpdma_desc_to_port(chan, mode, directed); 717 718 desc_write(desc, hw_next, 0); 719 desc_write(desc, hw_buffer, buffer); 720 desc_write(desc, hw_len, len); 721 desc_write(desc, hw_mode, mode | len); 722 desc_write(desc, sw_token, token); 723 desc_write(desc, sw_buffer, buffer); 724 desc_write(desc, sw_len, len); 725 726 __cpdma_chan_submit(chan, desc); 727 728 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 729 chan_write(chan, rxfree, 1); 730 731 chan->count++; 732 733 unlock_ret: 734 spin_unlock_irqrestore(&chan->lock, flags); 735 return ret; 736 } 737 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 738 739 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 740 { 741 unsigned long flags; 742 int index; 743 bool ret; 744 struct cpdma_ctlr *ctlr = chan->ctlr; 745 struct cpdma_desc_pool *pool = ctlr->pool; 746 747 spin_lock_irqsave(&pool->lock, flags); 748 749 index = bitmap_find_next_zero_area(pool->bitmap, 750 pool->num_desc, pool->num_desc/2, 1, 0); 751 752 if (index < pool->num_desc) 753 ret = true; 754 else 755 ret = false; 756 757 spin_unlock_irqrestore(&pool->lock, flags); 758 return ret; 759 } 760 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 761 762 static void __cpdma_chan_free(struct cpdma_chan *chan, 763 struct cpdma_desc __iomem *desc, 764 int outlen, int status) 765 { 766 struct cpdma_ctlr *ctlr = chan->ctlr; 767 struct cpdma_desc_pool *pool = ctlr->pool; 768 dma_addr_t buff_dma; 769 int origlen; 770 void *token; 771 772 token = (void *)desc_read(desc, sw_token); 773 buff_dma = desc_read(desc, sw_buffer); 774 origlen = desc_read(desc, sw_len); 775 776 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 777 cpdma_desc_free(pool, desc, 1); 778 (*chan->handler)(token, outlen, status); 779 } 780 781 static int __cpdma_chan_process(struct cpdma_chan *chan) 782 { 783 struct cpdma_ctlr *ctlr = chan->ctlr; 784 struct cpdma_desc __iomem *desc; 785 int status, outlen; 786 int cb_status = 0; 787 struct cpdma_desc_pool *pool = ctlr->pool; 788 dma_addr_t desc_dma; 789 unsigned long flags; 790 791 spin_lock_irqsave(&chan->lock, flags); 792 793 desc = chan->head; 794 if (!desc) { 795 chan->stats.empty_dequeue++; 796 status = -ENOENT; 797 goto unlock_ret; 798 } 799 desc_dma = desc_phys(pool, desc); 800 801 status = __raw_readl(&desc->hw_mode); 802 outlen = status & 0x7ff; 803 if (status & CPDMA_DESC_OWNER) { 804 chan->stats.busy_dequeue++; 805 status = -EBUSY; 806 goto unlock_ret; 807 } 808 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 809 CPDMA_DESC_PORT_MASK); 810 811 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 812 chan_write(chan, cp, desc_dma); 813 chan->count--; 814 chan->stats.good_dequeue++; 815 816 if (status & CPDMA_DESC_EOQ) { 817 chan->stats.requeue++; 818 chan_write(chan, hdp, desc_phys(pool, chan->head)); 819 } 820 821 spin_unlock_irqrestore(&chan->lock, flags); 822 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 823 cb_status = -ENOSYS; 824 else 825 cb_status = status; 826 827 __cpdma_chan_free(chan, desc, outlen, cb_status); 828 return status; 829 830 unlock_ret: 831 spin_unlock_irqrestore(&chan->lock, flags); 832 return status; 833 } 834 835 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 836 { 837 int used = 0, ret = 0; 838 839 if (chan->state != CPDMA_STATE_ACTIVE) 840 return -EINVAL; 841 842 while (used < quota) { 843 ret = __cpdma_chan_process(chan); 844 if (ret < 0) 845 break; 846 used++; 847 } 848 return used; 849 } 850 EXPORT_SYMBOL_GPL(cpdma_chan_process); 851 852 int cpdma_chan_start(struct cpdma_chan *chan) 853 { 854 struct cpdma_ctlr *ctlr = chan->ctlr; 855 struct cpdma_desc_pool *pool = ctlr->pool; 856 unsigned long flags; 857 858 spin_lock_irqsave(&chan->lock, flags); 859 if (chan->state != CPDMA_STATE_IDLE) { 860 spin_unlock_irqrestore(&chan->lock, flags); 861 return -EBUSY; 862 } 863 if (ctlr->state != CPDMA_STATE_ACTIVE) { 864 spin_unlock_irqrestore(&chan->lock, flags); 865 return -EINVAL; 866 } 867 dma_reg_write(ctlr, chan->int_set, chan->mask); 868 chan->state = CPDMA_STATE_ACTIVE; 869 if (chan->head) { 870 chan_write(chan, hdp, desc_phys(pool, chan->head)); 871 if (chan->rxfree) 872 chan_write(chan, rxfree, chan->count); 873 } 874 875 spin_unlock_irqrestore(&chan->lock, flags); 876 return 0; 877 } 878 EXPORT_SYMBOL_GPL(cpdma_chan_start); 879 880 int cpdma_chan_stop(struct cpdma_chan *chan) 881 { 882 struct cpdma_ctlr *ctlr = chan->ctlr; 883 struct cpdma_desc_pool *pool = ctlr->pool; 884 unsigned long flags; 885 int ret; 886 unsigned timeout; 887 888 spin_lock_irqsave(&chan->lock, flags); 889 if (chan->state != CPDMA_STATE_ACTIVE) { 890 spin_unlock_irqrestore(&chan->lock, flags); 891 return -EINVAL; 892 } 893 894 chan->state = CPDMA_STATE_TEARDOWN; 895 dma_reg_write(ctlr, chan->int_clear, chan->mask); 896 897 /* trigger teardown */ 898 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 899 900 /* wait for teardown complete */ 901 timeout = 100 * 100; /* 100 ms */ 902 while (timeout) { 903 u32 cp = chan_read(chan, cp); 904 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 905 break; 906 udelay(10); 907 timeout--; 908 } 909 WARN_ON(!timeout); 910 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 911 912 /* handle completed packets */ 913 spin_unlock_irqrestore(&chan->lock, flags); 914 do { 915 ret = __cpdma_chan_process(chan); 916 if (ret < 0) 917 break; 918 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 919 spin_lock_irqsave(&chan->lock, flags); 920 921 /* remaining packets haven't been tx/rx'ed, clean them up */ 922 while (chan->head) { 923 struct cpdma_desc __iomem *desc = chan->head; 924 dma_addr_t next_dma; 925 926 next_dma = desc_read(desc, hw_next); 927 chan->head = desc_from_phys(pool, next_dma); 928 chan->count--; 929 chan->stats.teardown_dequeue++; 930 931 /* issue callback without locks held */ 932 spin_unlock_irqrestore(&chan->lock, flags); 933 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 934 spin_lock_irqsave(&chan->lock, flags); 935 } 936 937 chan->state = CPDMA_STATE_IDLE; 938 spin_unlock_irqrestore(&chan->lock, flags); 939 return 0; 940 } 941 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 942 943 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 944 { 945 unsigned long flags; 946 947 spin_lock_irqsave(&chan->lock, flags); 948 if (chan->state != CPDMA_STATE_ACTIVE) { 949 spin_unlock_irqrestore(&chan->lock, flags); 950 return -EINVAL; 951 } 952 953 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 954 chan->mask); 955 spin_unlock_irqrestore(&chan->lock, flags); 956 957 return 0; 958 } 959 960 struct cpdma_control_info { 961 u32 reg; 962 u32 shift, mask; 963 int access; 964 #define ACCESS_RO BIT(0) 965 #define ACCESS_WO BIT(1) 966 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 967 }; 968 969 struct cpdma_control_info controls[] = { 970 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 971 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 972 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 973 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 974 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 975 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 976 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 977 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 978 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 979 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 980 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 981 }; 982 983 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 984 { 985 unsigned long flags; 986 struct cpdma_control_info *info = &controls[control]; 987 int ret; 988 989 spin_lock_irqsave(&ctlr->lock, flags); 990 991 ret = -ENOTSUPP; 992 if (!ctlr->params.has_ext_regs) 993 goto unlock_ret; 994 995 ret = -EINVAL; 996 if (ctlr->state != CPDMA_STATE_ACTIVE) 997 goto unlock_ret; 998 999 ret = -ENOENT; 1000 if (control < 0 || control >= ARRAY_SIZE(controls)) 1001 goto unlock_ret; 1002 1003 ret = -EPERM; 1004 if ((info->access & ACCESS_RO) != ACCESS_RO) 1005 goto unlock_ret; 1006 1007 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 1008 1009 unlock_ret: 1010 spin_unlock_irqrestore(&ctlr->lock, flags); 1011 return ret; 1012 } 1013 1014 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1015 { 1016 unsigned long flags; 1017 struct cpdma_control_info *info = &controls[control]; 1018 int ret; 1019 u32 val; 1020 1021 spin_lock_irqsave(&ctlr->lock, flags); 1022 1023 ret = -ENOTSUPP; 1024 if (!ctlr->params.has_ext_regs) 1025 goto unlock_ret; 1026 1027 ret = -EINVAL; 1028 if (ctlr->state != CPDMA_STATE_ACTIVE) 1029 goto unlock_ret; 1030 1031 ret = -ENOENT; 1032 if (control < 0 || control >= ARRAY_SIZE(controls)) 1033 goto unlock_ret; 1034 1035 ret = -EPERM; 1036 if ((info->access & ACCESS_WO) != ACCESS_WO) 1037 goto unlock_ret; 1038 1039 val = dma_reg_read(ctlr, info->reg); 1040 val &= ~(info->mask << info->shift); 1041 val |= (value & info->mask) << info->shift; 1042 dma_reg_write(ctlr, info->reg, val); 1043 ret = 0; 1044 1045 unlock_ret: 1046 spin_unlock_irqrestore(&ctlr->lock, flags); 1047 return ret; 1048 } 1049 EXPORT_SYMBOL_GPL(cpdma_control_set); 1050 1051 MODULE_LICENSE("GPL"); 1052