1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TXINTSTATRAW 0x80 36 #define CPDMA_TXINTSTATMASKED 0x84 37 #define CPDMA_TXINTMASKSET 0x88 38 #define CPDMA_TXINTMASKCLEAR 0x8c 39 #define CPDMA_MACINVECTOR 0x90 40 #define CPDMA_MACEOIVECTOR 0x94 41 #define CPDMA_RXINTSTATRAW 0xa0 42 #define CPDMA_RXINTSTATMASKED 0xa4 43 #define CPDMA_RXINTMASKSET 0xa8 44 #define CPDMA_RXINTMASKCLEAR 0xac 45 #define CPDMA_DMAINTSTATRAW 0xb0 46 #define CPDMA_DMAINTSTATMASKED 0xb4 47 #define CPDMA_DMAINTMASKSET 0xb8 48 #define CPDMA_DMAINTMASKCLEAR 0xbc 49 #define CPDMA_DMAINT_HOSTERR BIT(1) 50 51 /* the following exist only if has_ext_regs is set */ 52 #define CPDMA_DMACONTROL 0x20 53 #define CPDMA_DMASTATUS 0x24 54 #define CPDMA_RXBUFFOFS 0x28 55 #define CPDMA_EM_CONTROL 0x2c 56 57 /* Descriptor mode bits */ 58 #define CPDMA_DESC_SOP BIT(31) 59 #define CPDMA_DESC_EOP BIT(30) 60 #define CPDMA_DESC_OWNER BIT(29) 61 #define CPDMA_DESC_EOQ BIT(28) 62 #define CPDMA_DESC_TD_COMPLETE BIT(27) 63 #define CPDMA_DESC_PASS_CRC BIT(26) 64 #define CPDMA_DESC_TO_PORT_EN BIT(20) 65 #define CPDMA_TO_PORT_SHIFT 16 66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 67 #define CPDMA_DESC_CRC_LEN 4 68 69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 70 71 struct cpdma_desc { 72 /* hardware fields */ 73 u32 hw_next; 74 u32 hw_buffer; 75 u32 hw_len; 76 u32 hw_mode; 77 /* software fields */ 78 void *sw_token; 79 u32 sw_buffer; 80 u32 sw_len; 81 }; 82 83 struct cpdma_desc_pool { 84 u32 phys; 85 u32 hw_addr; 86 void __iomem *iomap; /* ioremap map */ 87 void *cpumap; /* dma_alloc map */ 88 int desc_size, mem_size; 89 int num_desc, used_desc; 90 unsigned long *bitmap; 91 struct device *dev; 92 spinlock_t lock; 93 }; 94 95 enum cpdma_state { 96 CPDMA_STATE_IDLE, 97 CPDMA_STATE_ACTIVE, 98 CPDMA_STATE_TEARDOWN, 99 }; 100 101 static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 102 103 struct cpdma_ctlr { 104 enum cpdma_state state; 105 struct cpdma_params params; 106 struct device *dev; 107 struct cpdma_desc_pool *pool; 108 spinlock_t lock; 109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 110 }; 111 112 struct cpdma_chan { 113 struct cpdma_desc __iomem *head, *tail; 114 void __iomem *hdp, *cp, *rxfree; 115 enum cpdma_state state; 116 struct cpdma_ctlr *ctlr; 117 int chan_num; 118 spinlock_t lock; 119 int count; 120 u32 mask; 121 cpdma_handler_fn handler; 122 enum dma_data_direction dir; 123 struct cpdma_chan_stats stats; 124 /* offsets into dmaregs */ 125 int int_set, int_clear, td; 126 }; 127 128 /* The following make access to common cpdma_ctlr params more readable */ 129 #define dmaregs params.dmaregs 130 #define num_chan params.num_chan 131 132 /* various accessors */ 133 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 134 #define chan_read(chan, fld) __raw_readl((chan)->fld) 135 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 136 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 137 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 138 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 139 140 #define cpdma_desc_to_port(chan, mode, directed) \ 141 do { \ 142 if (!is_rx_chan(chan) && ((directed == 1) || \ 143 (directed == 2))) \ 144 mode |= (CPDMA_DESC_TO_PORT_EN | \ 145 (directed << CPDMA_TO_PORT_SHIFT)); \ 146 } while (0) 147 148 /* 149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 150 * emac) have dedicated on-chip memory for these descriptors. Some other 151 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 152 * abstract out these details 153 */ 154 static struct cpdma_desc_pool * 155 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 156 int size, int align) 157 { 158 int bitmap_size; 159 struct cpdma_desc_pool *pool; 160 161 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 162 if (!pool) 163 return NULL; 164 165 spin_lock_init(&pool->lock); 166 167 pool->dev = dev; 168 pool->mem_size = size; 169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 170 pool->num_desc = size / pool->desc_size; 171 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 173 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 174 if (!pool->bitmap) 175 goto fail; 176 177 if (phys) { 178 pool->phys = phys; 179 pool->iomap = ioremap(phys, size); 180 pool->hw_addr = hw_addr; 181 } else { 182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 183 GFP_KERNEL); 184 pool->iomap = pool->cpumap; 185 pool->hw_addr = pool->phys; 186 } 187 188 if (pool->iomap) 189 return pool; 190 191 fail: 192 kfree(pool->bitmap); 193 kfree(pool); 194 return NULL; 195 } 196 197 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 198 { 199 unsigned long flags; 200 201 if (!pool) 202 return; 203 204 spin_lock_irqsave(&pool->lock, flags); 205 WARN_ON(pool->used_desc); 206 kfree(pool->bitmap); 207 if (pool->cpumap) { 208 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 209 pool->phys); 210 } else { 211 iounmap(pool->iomap); 212 } 213 spin_unlock_irqrestore(&pool->lock, flags); 214 kfree(pool); 215 } 216 217 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 218 struct cpdma_desc __iomem *desc) 219 { 220 if (!desc) 221 return 0; 222 return pool->hw_addr + (__force dma_addr_t)desc - 223 (__force dma_addr_t)pool->iomap; 224 } 225 226 static inline struct cpdma_desc __iomem * 227 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 228 { 229 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 230 } 231 232 static struct cpdma_desc __iomem * 233 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) 234 { 235 unsigned long flags; 236 int index; 237 int desc_start; 238 int desc_end; 239 struct cpdma_desc __iomem *desc = NULL; 240 241 spin_lock_irqsave(&pool->lock, flags); 242 243 if (is_rx) { 244 desc_start = 0; 245 desc_end = pool->num_desc/2; 246 } else { 247 desc_start = pool->num_desc/2; 248 desc_end = pool->num_desc; 249 } 250 251 index = bitmap_find_next_zero_area(pool->bitmap, 252 desc_end, desc_start, num_desc, 0); 253 if (index < desc_end) { 254 bitmap_set(pool->bitmap, index, num_desc); 255 desc = pool->iomap + pool->desc_size * index; 256 pool->used_desc++; 257 } 258 259 spin_unlock_irqrestore(&pool->lock, flags); 260 return desc; 261 } 262 263 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 264 struct cpdma_desc __iomem *desc, int num_desc) 265 { 266 unsigned long flags, index; 267 268 index = ((unsigned long)desc - (unsigned long)pool->iomap) / 269 pool->desc_size; 270 spin_lock_irqsave(&pool->lock, flags); 271 bitmap_clear(pool->bitmap, index, num_desc); 272 pool->used_desc--; 273 spin_unlock_irqrestore(&pool->lock, flags); 274 } 275 276 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 277 { 278 struct cpdma_ctlr *ctlr; 279 280 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 281 if (!ctlr) 282 return NULL; 283 284 ctlr->state = CPDMA_STATE_IDLE; 285 ctlr->params = *params; 286 ctlr->dev = params->dev; 287 spin_lock_init(&ctlr->lock); 288 289 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 290 ctlr->params.desc_mem_phys, 291 ctlr->params.desc_hw_addr, 292 ctlr->params.desc_mem_size, 293 ctlr->params.desc_align); 294 if (!ctlr->pool) { 295 kfree(ctlr); 296 return NULL; 297 } 298 299 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 300 ctlr->num_chan = CPDMA_MAX_CHANNELS; 301 return ctlr; 302 } 303 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 304 305 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 306 { 307 unsigned long flags; 308 int i; 309 310 spin_lock_irqsave(&ctlr->lock, flags); 311 if (ctlr->state != CPDMA_STATE_IDLE) { 312 spin_unlock_irqrestore(&ctlr->lock, flags); 313 return -EBUSY; 314 } 315 316 if (ctlr->params.has_soft_reset) { 317 unsigned timeout = 10 * 100; 318 319 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 320 while (timeout) { 321 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 322 break; 323 udelay(10); 324 timeout--; 325 } 326 WARN_ON(!timeout); 327 } 328 329 for (i = 0; i < ctlr->num_chan; i++) { 330 __raw_writel(0, ctlr->params.txhdp + 4 * i); 331 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 332 __raw_writel(0, ctlr->params.txcp + 4 * i); 333 __raw_writel(0, ctlr->params.rxcp + 4 * i); 334 } 335 336 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 337 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 338 339 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 340 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 341 342 ctlr->state = CPDMA_STATE_ACTIVE; 343 344 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 345 if (ctlr->channels[i]) 346 cpdma_chan_start(ctlr->channels[i]); 347 } 348 spin_unlock_irqrestore(&ctlr->lock, flags); 349 return 0; 350 } 351 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 352 353 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 354 { 355 unsigned long flags; 356 int i; 357 358 spin_lock_irqsave(&ctlr->lock, flags); 359 if (ctlr->state != CPDMA_STATE_ACTIVE) { 360 spin_unlock_irqrestore(&ctlr->lock, flags); 361 return -EINVAL; 362 } 363 364 ctlr->state = CPDMA_STATE_TEARDOWN; 365 366 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 367 if (ctlr->channels[i]) 368 cpdma_chan_stop(ctlr->channels[i]); 369 } 370 371 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 372 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 373 374 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 375 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 376 377 ctlr->state = CPDMA_STATE_IDLE; 378 379 spin_unlock_irqrestore(&ctlr->lock, flags); 380 return 0; 381 } 382 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 383 384 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 385 { 386 struct device *dev = ctlr->dev; 387 unsigned long flags; 388 int i; 389 390 spin_lock_irqsave(&ctlr->lock, flags); 391 392 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); 393 394 dev_info(dev, "CPDMA: txidver: %x", 395 dma_reg_read(ctlr, CPDMA_TXIDVER)); 396 dev_info(dev, "CPDMA: txcontrol: %x", 397 dma_reg_read(ctlr, CPDMA_TXCONTROL)); 398 dev_info(dev, "CPDMA: txteardown: %x", 399 dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); 400 dev_info(dev, "CPDMA: rxidver: %x", 401 dma_reg_read(ctlr, CPDMA_RXIDVER)); 402 dev_info(dev, "CPDMA: rxcontrol: %x", 403 dma_reg_read(ctlr, CPDMA_RXCONTROL)); 404 dev_info(dev, "CPDMA: softreset: %x", 405 dma_reg_read(ctlr, CPDMA_SOFTRESET)); 406 dev_info(dev, "CPDMA: rxteardown: %x", 407 dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); 408 dev_info(dev, "CPDMA: txintstatraw: %x", 409 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); 410 dev_info(dev, "CPDMA: txintstatmasked: %x", 411 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); 412 dev_info(dev, "CPDMA: txintmaskset: %x", 413 dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); 414 dev_info(dev, "CPDMA: txintmaskclear: %x", 415 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); 416 dev_info(dev, "CPDMA: macinvector: %x", 417 dma_reg_read(ctlr, CPDMA_MACINVECTOR)); 418 dev_info(dev, "CPDMA: maceoivector: %x", 419 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); 420 dev_info(dev, "CPDMA: rxintstatraw: %x", 421 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); 422 dev_info(dev, "CPDMA: rxintstatmasked: %x", 423 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); 424 dev_info(dev, "CPDMA: rxintmaskset: %x", 425 dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); 426 dev_info(dev, "CPDMA: rxintmaskclear: %x", 427 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); 428 dev_info(dev, "CPDMA: dmaintstatraw: %x", 429 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); 430 dev_info(dev, "CPDMA: dmaintstatmasked: %x", 431 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); 432 dev_info(dev, "CPDMA: dmaintmaskset: %x", 433 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); 434 dev_info(dev, "CPDMA: dmaintmaskclear: %x", 435 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); 436 437 if (!ctlr->params.has_ext_regs) { 438 dev_info(dev, "CPDMA: dmacontrol: %x", 439 dma_reg_read(ctlr, CPDMA_DMACONTROL)); 440 dev_info(dev, "CPDMA: dmastatus: %x", 441 dma_reg_read(ctlr, CPDMA_DMASTATUS)); 442 dev_info(dev, "CPDMA: rxbuffofs: %x", 443 dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); 444 } 445 446 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 447 if (ctlr->channels[i]) 448 cpdma_chan_dump(ctlr->channels[i]); 449 450 spin_unlock_irqrestore(&ctlr->lock, flags); 451 return 0; 452 } 453 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); 454 455 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 456 { 457 unsigned long flags; 458 int ret = 0, i; 459 460 if (!ctlr) 461 return -EINVAL; 462 463 spin_lock_irqsave(&ctlr->lock, flags); 464 if (ctlr->state != CPDMA_STATE_IDLE) 465 cpdma_ctlr_stop(ctlr); 466 467 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 468 cpdma_chan_destroy(ctlr->channels[i]); 469 470 cpdma_desc_pool_destroy(ctlr->pool); 471 spin_unlock_irqrestore(&ctlr->lock, flags); 472 kfree(ctlr); 473 return ret; 474 } 475 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 476 477 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 478 { 479 unsigned long flags; 480 int i, reg; 481 482 spin_lock_irqsave(&ctlr->lock, flags); 483 if (ctlr->state != CPDMA_STATE_ACTIVE) { 484 spin_unlock_irqrestore(&ctlr->lock, flags); 485 return -EINVAL; 486 } 487 488 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 489 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 490 491 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 492 if (ctlr->channels[i]) 493 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 494 } 495 496 spin_unlock_irqrestore(&ctlr->lock, flags); 497 return 0; 498 } 499 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 500 501 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 502 { 503 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 504 } 505 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 506 507 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 508 cpdma_handler_fn handler) 509 { 510 struct cpdma_chan *chan; 511 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 512 unsigned long flags; 513 514 if (__chan_linear(chan_num) >= ctlr->num_chan) 515 return NULL; 516 517 ret = -ENOMEM; 518 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 519 if (!chan) 520 goto err_chan_alloc; 521 522 spin_lock_irqsave(&ctlr->lock, flags); 523 ret = -EBUSY; 524 if (ctlr->channels[chan_num]) 525 goto err_chan_busy; 526 527 chan->ctlr = ctlr; 528 chan->state = CPDMA_STATE_IDLE; 529 chan->chan_num = chan_num; 530 chan->handler = handler; 531 532 if (is_rx_chan(chan)) { 533 chan->hdp = ctlr->params.rxhdp + offset; 534 chan->cp = ctlr->params.rxcp + offset; 535 chan->rxfree = ctlr->params.rxfree + offset; 536 chan->int_set = CPDMA_RXINTMASKSET; 537 chan->int_clear = CPDMA_RXINTMASKCLEAR; 538 chan->td = CPDMA_RXTEARDOWN; 539 chan->dir = DMA_FROM_DEVICE; 540 } else { 541 chan->hdp = ctlr->params.txhdp + offset; 542 chan->cp = ctlr->params.txcp + offset; 543 chan->int_set = CPDMA_TXINTMASKSET; 544 chan->int_clear = CPDMA_TXINTMASKCLEAR; 545 chan->td = CPDMA_TXTEARDOWN; 546 chan->dir = DMA_TO_DEVICE; 547 } 548 chan->mask = BIT(chan_linear(chan)); 549 550 spin_lock_init(&chan->lock); 551 552 ctlr->channels[chan_num] = chan; 553 spin_unlock_irqrestore(&ctlr->lock, flags); 554 return chan; 555 556 err_chan_busy: 557 spin_unlock_irqrestore(&ctlr->lock, flags); 558 kfree(chan); 559 err_chan_alloc: 560 return ERR_PTR(ret); 561 } 562 EXPORT_SYMBOL_GPL(cpdma_chan_create); 563 564 int cpdma_chan_destroy(struct cpdma_chan *chan) 565 { 566 struct cpdma_ctlr *ctlr; 567 unsigned long flags; 568 569 if (!chan) 570 return -EINVAL; 571 ctlr = chan->ctlr; 572 573 spin_lock_irqsave(&ctlr->lock, flags); 574 if (chan->state != CPDMA_STATE_IDLE) 575 cpdma_chan_stop(chan); 576 ctlr->channels[chan->chan_num] = NULL; 577 spin_unlock_irqrestore(&ctlr->lock, flags); 578 kfree(chan); 579 return 0; 580 } 581 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 582 583 int cpdma_chan_get_stats(struct cpdma_chan *chan, 584 struct cpdma_chan_stats *stats) 585 { 586 unsigned long flags; 587 if (!chan) 588 return -EINVAL; 589 spin_lock_irqsave(&chan->lock, flags); 590 memcpy(stats, &chan->stats, sizeof(*stats)); 591 spin_unlock_irqrestore(&chan->lock, flags); 592 return 0; 593 } 594 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 595 596 int cpdma_chan_dump(struct cpdma_chan *chan) 597 { 598 unsigned long flags; 599 struct device *dev = chan->ctlr->dev; 600 601 spin_lock_irqsave(&chan->lock, flags); 602 603 dev_info(dev, "channel %d (%s %d) state %s", 604 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", 605 chan_linear(chan), cpdma_state_str[chan->state]); 606 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); 607 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); 608 if (chan->rxfree) { 609 dev_info(dev, "\trxfree: %x\n", 610 chan_read(chan, rxfree)); 611 } 612 613 dev_info(dev, "\tstats head_enqueue: %d\n", 614 chan->stats.head_enqueue); 615 dev_info(dev, "\tstats tail_enqueue: %d\n", 616 chan->stats.tail_enqueue); 617 dev_info(dev, "\tstats pad_enqueue: %d\n", 618 chan->stats.pad_enqueue); 619 dev_info(dev, "\tstats misqueued: %d\n", 620 chan->stats.misqueued); 621 dev_info(dev, "\tstats desc_alloc_fail: %d\n", 622 chan->stats.desc_alloc_fail); 623 dev_info(dev, "\tstats pad_alloc_fail: %d\n", 624 chan->stats.pad_alloc_fail); 625 dev_info(dev, "\tstats runt_receive_buff: %d\n", 626 chan->stats.runt_receive_buff); 627 dev_info(dev, "\tstats runt_transmit_buff: %d\n", 628 chan->stats.runt_transmit_buff); 629 dev_info(dev, "\tstats empty_dequeue: %d\n", 630 chan->stats.empty_dequeue); 631 dev_info(dev, "\tstats busy_dequeue: %d\n", 632 chan->stats.busy_dequeue); 633 dev_info(dev, "\tstats good_dequeue: %d\n", 634 chan->stats.good_dequeue); 635 dev_info(dev, "\tstats requeue: %d\n", 636 chan->stats.requeue); 637 dev_info(dev, "\tstats teardown_dequeue: %d\n", 638 chan->stats.teardown_dequeue); 639 640 spin_unlock_irqrestore(&chan->lock, flags); 641 return 0; 642 } 643 644 static void __cpdma_chan_submit(struct cpdma_chan *chan, 645 struct cpdma_desc __iomem *desc) 646 { 647 struct cpdma_ctlr *ctlr = chan->ctlr; 648 struct cpdma_desc __iomem *prev = chan->tail; 649 struct cpdma_desc_pool *pool = ctlr->pool; 650 dma_addr_t desc_dma; 651 u32 mode; 652 653 desc_dma = desc_phys(pool, desc); 654 655 /* simple case - idle channel */ 656 if (!chan->head) { 657 chan->stats.head_enqueue++; 658 chan->head = desc; 659 chan->tail = desc; 660 if (chan->state == CPDMA_STATE_ACTIVE) 661 chan_write(chan, hdp, desc_dma); 662 return; 663 } 664 665 /* first chain the descriptor at the tail of the list */ 666 desc_write(prev, hw_next, desc_dma); 667 chan->tail = desc; 668 chan->stats.tail_enqueue++; 669 670 /* next check if EOQ has been triggered already */ 671 mode = desc_read(prev, hw_mode); 672 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 673 (chan->state == CPDMA_STATE_ACTIVE)) { 674 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 675 chan_write(chan, hdp, desc_dma); 676 chan->stats.misqueued++; 677 } 678 } 679 680 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 681 int len, int directed) 682 { 683 struct cpdma_ctlr *ctlr = chan->ctlr; 684 struct cpdma_desc __iomem *desc; 685 dma_addr_t buffer; 686 unsigned long flags; 687 u32 mode; 688 int ret = 0; 689 690 spin_lock_irqsave(&chan->lock, flags); 691 692 if (chan->state == CPDMA_STATE_TEARDOWN) { 693 ret = -EINVAL; 694 goto unlock_ret; 695 } 696 697 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); 698 if (!desc) { 699 chan->stats.desc_alloc_fail++; 700 ret = -ENOMEM; 701 goto unlock_ret; 702 } 703 704 if (len < ctlr->params.min_packet_size) { 705 len = ctlr->params.min_packet_size; 706 chan->stats.runt_transmit_buff++; 707 } 708 709 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 710 ret = dma_mapping_error(ctlr->dev, buffer); 711 if (ret) { 712 cpdma_desc_free(ctlr->pool, desc, 1); 713 ret = -EINVAL; 714 goto unlock_ret; 715 } 716 717 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 718 cpdma_desc_to_port(chan, mode, directed); 719 720 desc_write(desc, hw_next, 0); 721 desc_write(desc, hw_buffer, buffer); 722 desc_write(desc, hw_len, len); 723 desc_write(desc, hw_mode, mode | len); 724 desc_write(desc, sw_token, token); 725 desc_write(desc, sw_buffer, buffer); 726 desc_write(desc, sw_len, len); 727 728 __cpdma_chan_submit(chan, desc); 729 730 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 731 chan_write(chan, rxfree, 1); 732 733 chan->count++; 734 735 unlock_ret: 736 spin_unlock_irqrestore(&chan->lock, flags); 737 return ret; 738 } 739 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 740 741 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 742 { 743 unsigned long flags; 744 int index; 745 bool ret; 746 struct cpdma_ctlr *ctlr = chan->ctlr; 747 struct cpdma_desc_pool *pool = ctlr->pool; 748 749 spin_lock_irqsave(&pool->lock, flags); 750 751 index = bitmap_find_next_zero_area(pool->bitmap, 752 pool->num_desc, pool->num_desc/2, 1, 0); 753 754 if (index < pool->num_desc) 755 ret = true; 756 else 757 ret = false; 758 759 spin_unlock_irqrestore(&pool->lock, flags); 760 return ret; 761 } 762 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 763 764 static void __cpdma_chan_free(struct cpdma_chan *chan, 765 struct cpdma_desc __iomem *desc, 766 int outlen, int status) 767 { 768 struct cpdma_ctlr *ctlr = chan->ctlr; 769 struct cpdma_desc_pool *pool = ctlr->pool; 770 dma_addr_t buff_dma; 771 int origlen; 772 void *token; 773 774 token = (void *)desc_read(desc, sw_token); 775 buff_dma = desc_read(desc, sw_buffer); 776 origlen = desc_read(desc, sw_len); 777 778 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 779 cpdma_desc_free(pool, desc, 1); 780 (*chan->handler)(token, outlen, status); 781 } 782 783 static int __cpdma_chan_process(struct cpdma_chan *chan) 784 { 785 struct cpdma_ctlr *ctlr = chan->ctlr; 786 struct cpdma_desc __iomem *desc; 787 int status, outlen; 788 int cb_status = 0; 789 struct cpdma_desc_pool *pool = ctlr->pool; 790 dma_addr_t desc_dma; 791 unsigned long flags; 792 793 spin_lock_irqsave(&chan->lock, flags); 794 795 desc = chan->head; 796 if (!desc) { 797 chan->stats.empty_dequeue++; 798 status = -ENOENT; 799 goto unlock_ret; 800 } 801 desc_dma = desc_phys(pool, desc); 802 803 status = __raw_readl(&desc->hw_mode); 804 outlen = status & 0x7ff; 805 if (status & CPDMA_DESC_OWNER) { 806 chan->stats.busy_dequeue++; 807 status = -EBUSY; 808 goto unlock_ret; 809 } 810 811 if (status & CPDMA_DESC_PASS_CRC) 812 outlen -= CPDMA_DESC_CRC_LEN; 813 814 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 815 CPDMA_DESC_PORT_MASK); 816 817 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 818 chan_write(chan, cp, desc_dma); 819 chan->count--; 820 chan->stats.good_dequeue++; 821 822 if (status & CPDMA_DESC_EOQ) { 823 chan->stats.requeue++; 824 chan_write(chan, hdp, desc_phys(pool, chan->head)); 825 } 826 827 spin_unlock_irqrestore(&chan->lock, flags); 828 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 829 cb_status = -ENOSYS; 830 else 831 cb_status = status; 832 833 __cpdma_chan_free(chan, desc, outlen, cb_status); 834 return status; 835 836 unlock_ret: 837 spin_unlock_irqrestore(&chan->lock, flags); 838 return status; 839 } 840 841 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 842 { 843 int used = 0, ret = 0; 844 845 if (chan->state != CPDMA_STATE_ACTIVE) 846 return -EINVAL; 847 848 while (used < quota) { 849 ret = __cpdma_chan_process(chan); 850 if (ret < 0) 851 break; 852 used++; 853 } 854 return used; 855 } 856 EXPORT_SYMBOL_GPL(cpdma_chan_process); 857 858 int cpdma_chan_start(struct cpdma_chan *chan) 859 { 860 struct cpdma_ctlr *ctlr = chan->ctlr; 861 struct cpdma_desc_pool *pool = ctlr->pool; 862 unsigned long flags; 863 864 spin_lock_irqsave(&chan->lock, flags); 865 if (chan->state != CPDMA_STATE_IDLE) { 866 spin_unlock_irqrestore(&chan->lock, flags); 867 return -EBUSY; 868 } 869 if (ctlr->state != CPDMA_STATE_ACTIVE) { 870 spin_unlock_irqrestore(&chan->lock, flags); 871 return -EINVAL; 872 } 873 dma_reg_write(ctlr, chan->int_set, chan->mask); 874 chan->state = CPDMA_STATE_ACTIVE; 875 if (chan->head) { 876 chan_write(chan, hdp, desc_phys(pool, chan->head)); 877 if (chan->rxfree) 878 chan_write(chan, rxfree, chan->count); 879 } 880 881 spin_unlock_irqrestore(&chan->lock, flags); 882 return 0; 883 } 884 EXPORT_SYMBOL_GPL(cpdma_chan_start); 885 886 int cpdma_chan_stop(struct cpdma_chan *chan) 887 { 888 struct cpdma_ctlr *ctlr = chan->ctlr; 889 struct cpdma_desc_pool *pool = ctlr->pool; 890 unsigned long flags; 891 int ret; 892 unsigned timeout; 893 894 spin_lock_irqsave(&chan->lock, flags); 895 if (chan->state != CPDMA_STATE_ACTIVE) { 896 spin_unlock_irqrestore(&chan->lock, flags); 897 return -EINVAL; 898 } 899 900 chan->state = CPDMA_STATE_TEARDOWN; 901 dma_reg_write(ctlr, chan->int_clear, chan->mask); 902 903 /* trigger teardown */ 904 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 905 906 /* wait for teardown complete */ 907 timeout = 100 * 100; /* 100 ms */ 908 while (timeout) { 909 u32 cp = chan_read(chan, cp); 910 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 911 break; 912 udelay(10); 913 timeout--; 914 } 915 WARN_ON(!timeout); 916 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 917 918 /* handle completed packets */ 919 spin_unlock_irqrestore(&chan->lock, flags); 920 do { 921 ret = __cpdma_chan_process(chan); 922 if (ret < 0) 923 break; 924 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 925 spin_lock_irqsave(&chan->lock, flags); 926 927 /* remaining packets haven't been tx/rx'ed, clean them up */ 928 while (chan->head) { 929 struct cpdma_desc __iomem *desc = chan->head; 930 dma_addr_t next_dma; 931 932 next_dma = desc_read(desc, hw_next); 933 chan->head = desc_from_phys(pool, next_dma); 934 chan->count--; 935 chan->stats.teardown_dequeue++; 936 937 /* issue callback without locks held */ 938 spin_unlock_irqrestore(&chan->lock, flags); 939 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 940 spin_lock_irqsave(&chan->lock, flags); 941 } 942 943 chan->state = CPDMA_STATE_IDLE; 944 spin_unlock_irqrestore(&chan->lock, flags); 945 return 0; 946 } 947 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 948 949 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 950 { 951 unsigned long flags; 952 953 spin_lock_irqsave(&chan->lock, flags); 954 if (chan->state != CPDMA_STATE_ACTIVE) { 955 spin_unlock_irqrestore(&chan->lock, flags); 956 return -EINVAL; 957 } 958 959 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 960 chan->mask); 961 spin_unlock_irqrestore(&chan->lock, flags); 962 963 return 0; 964 } 965 966 struct cpdma_control_info { 967 u32 reg; 968 u32 shift, mask; 969 int access; 970 #define ACCESS_RO BIT(0) 971 #define ACCESS_WO BIT(1) 972 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 973 }; 974 975 struct cpdma_control_info controls[] = { 976 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 977 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 978 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 979 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 980 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 981 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 982 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 983 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 984 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 985 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 986 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 987 }; 988 989 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 990 { 991 unsigned long flags; 992 struct cpdma_control_info *info = &controls[control]; 993 int ret; 994 995 spin_lock_irqsave(&ctlr->lock, flags); 996 997 ret = -ENOTSUPP; 998 if (!ctlr->params.has_ext_regs) 999 goto unlock_ret; 1000 1001 ret = -EINVAL; 1002 if (ctlr->state != CPDMA_STATE_ACTIVE) 1003 goto unlock_ret; 1004 1005 ret = -ENOENT; 1006 if (control < 0 || control >= ARRAY_SIZE(controls)) 1007 goto unlock_ret; 1008 1009 ret = -EPERM; 1010 if ((info->access & ACCESS_RO) != ACCESS_RO) 1011 goto unlock_ret; 1012 1013 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 1014 1015 unlock_ret: 1016 spin_unlock_irqrestore(&ctlr->lock, flags); 1017 return ret; 1018 } 1019 1020 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1021 { 1022 unsigned long flags; 1023 struct cpdma_control_info *info = &controls[control]; 1024 int ret; 1025 u32 val; 1026 1027 spin_lock_irqsave(&ctlr->lock, flags); 1028 1029 ret = -ENOTSUPP; 1030 if (!ctlr->params.has_ext_regs) 1031 goto unlock_ret; 1032 1033 ret = -EINVAL; 1034 if (ctlr->state != CPDMA_STATE_ACTIVE) 1035 goto unlock_ret; 1036 1037 ret = -ENOENT; 1038 if (control < 0 || control >= ARRAY_SIZE(controls)) 1039 goto unlock_ret; 1040 1041 ret = -EPERM; 1042 if ((info->access & ACCESS_WO) != ACCESS_WO) 1043 goto unlock_ret; 1044 1045 val = dma_reg_read(ctlr, info->reg); 1046 val &= ~(info->mask << info->shift); 1047 val |= (value & info->mask) << info->shift; 1048 dma_reg_write(ctlr, info->reg, val); 1049 ret = 0; 1050 1051 unlock_ret: 1052 spin_unlock_irqrestore(&ctlr->lock, flags); 1053 return ret; 1054 } 1055 EXPORT_SYMBOL_GPL(cpdma_control_set); 1056 1057 MODULE_LICENSE("GPL"); 1058