1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 #include <linux/genalloc.h> 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TX_PRI0_RATE 0x30 36 #define CPDMA_TXINTSTATRAW 0x80 37 #define CPDMA_TXINTSTATMASKED 0x84 38 #define CPDMA_TXINTMASKSET 0x88 39 #define CPDMA_TXINTMASKCLEAR 0x8c 40 #define CPDMA_MACINVECTOR 0x90 41 #define CPDMA_MACEOIVECTOR 0x94 42 #define CPDMA_RXINTSTATRAW 0xa0 43 #define CPDMA_RXINTSTATMASKED 0xa4 44 #define CPDMA_RXINTMASKSET 0xa8 45 #define CPDMA_RXINTMASKCLEAR 0xac 46 #define CPDMA_DMAINTSTATRAW 0xb0 47 #define CPDMA_DMAINTSTATMASKED 0xb4 48 #define CPDMA_DMAINTMASKSET 0xb8 49 #define CPDMA_DMAINTMASKCLEAR 0xbc 50 #define CPDMA_DMAINT_HOSTERR BIT(1) 51 52 /* the following exist only if has_ext_regs is set */ 53 #define CPDMA_DMACONTROL 0x20 54 #define CPDMA_DMASTATUS 0x24 55 #define CPDMA_RXBUFFOFS 0x28 56 #define CPDMA_EM_CONTROL 0x2c 57 58 /* Descriptor mode bits */ 59 #define CPDMA_DESC_SOP BIT(31) 60 #define CPDMA_DESC_EOP BIT(30) 61 #define CPDMA_DESC_OWNER BIT(29) 62 #define CPDMA_DESC_EOQ BIT(28) 63 #define CPDMA_DESC_TD_COMPLETE BIT(27) 64 #define CPDMA_DESC_PASS_CRC BIT(26) 65 #define CPDMA_DESC_TO_PORT_EN BIT(20) 66 #define CPDMA_TO_PORT_SHIFT 16 67 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 68 #define CPDMA_DESC_CRC_LEN 4 69 70 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 71 72 #define CPDMA_MAX_RLIM_CNT 16384 73 74 struct cpdma_desc { 75 /* hardware fields */ 76 u32 hw_next; 77 u32 hw_buffer; 78 u32 hw_len; 79 u32 hw_mode; 80 /* software fields */ 81 void *sw_token; 82 u32 sw_buffer; 83 u32 sw_len; 84 }; 85 86 struct cpdma_desc_pool { 87 phys_addr_t phys; 88 dma_addr_t hw_addr; 89 void __iomem *iomap; /* ioremap map */ 90 void *cpumap; /* dma_alloc map */ 91 int desc_size, mem_size; 92 int num_desc; 93 struct device *dev; 94 struct gen_pool *gen_pool; 95 }; 96 97 enum cpdma_state { 98 CPDMA_STATE_IDLE, 99 CPDMA_STATE_ACTIVE, 100 CPDMA_STATE_TEARDOWN, 101 }; 102 103 struct cpdma_ctlr { 104 enum cpdma_state state; 105 struct cpdma_params params; 106 struct device *dev; 107 struct cpdma_desc_pool *pool; 108 spinlock_t lock; 109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 110 int chan_num; 111 int num_rx_desc; /* RX descriptors number */ 112 int num_tx_desc; /* TX descriptors number */ 113 }; 114 115 struct cpdma_chan { 116 struct cpdma_desc __iomem *head, *tail; 117 void __iomem *hdp, *cp, *rxfree; 118 enum cpdma_state state; 119 struct cpdma_ctlr *ctlr; 120 int chan_num; 121 spinlock_t lock; 122 int count; 123 u32 desc_num; 124 u32 mask; 125 cpdma_handler_fn handler; 126 enum dma_data_direction dir; 127 struct cpdma_chan_stats stats; 128 /* offsets into dmaregs */ 129 int int_set, int_clear, td; 130 int weight; 131 u32 rate_factor; 132 u32 rate; 133 }; 134 135 struct cpdma_control_info { 136 u32 reg; 137 u32 shift, mask; 138 int access; 139 #define ACCESS_RO BIT(0) 140 #define ACCESS_WO BIT(1) 141 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 142 }; 143 144 static struct cpdma_control_info controls[] = { 145 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW}, 146 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 147 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 148 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 149 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 150 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 151 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 152 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 153 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 154 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 155 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 156 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 157 }; 158 159 #define tx_chan_num(chan) (chan) 160 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) 161 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) 162 #define is_tx_chan(chan) (!is_rx_chan(chan)) 163 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) 164 #define chan_linear(chan) __chan_linear((chan)->chan_num) 165 166 /* The following make access to common cpdma_ctlr params more readable */ 167 #define dmaregs params.dmaregs 168 #define num_chan params.num_chan 169 170 /* various accessors */ 171 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) 172 #define chan_read(chan, fld) readl((chan)->fld) 173 #define desc_read(desc, fld) readl(&(desc)->fld) 174 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) 175 #define chan_write(chan, fld, v) writel(v, (chan)->fld) 176 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld) 177 178 #define cpdma_desc_to_port(chan, mode, directed) \ 179 do { \ 180 if (!is_rx_chan(chan) && ((directed == 1) || \ 181 (directed == 2))) \ 182 mode |= (CPDMA_DESC_TO_PORT_EN | \ 183 (directed << CPDMA_TO_PORT_SHIFT)); \ 184 } while (0) 185 186 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) 187 { 188 struct cpdma_desc_pool *pool = ctlr->pool; 189 190 if (!pool) 191 return; 192 193 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), 194 "cpdma_desc_pool size %d != avail %d", 195 gen_pool_size(pool->gen_pool), 196 gen_pool_avail(pool->gen_pool)); 197 if (pool->cpumap) 198 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, 199 pool->phys); 200 } 201 202 /* 203 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 204 * emac) have dedicated on-chip memory for these descriptors. Some other 205 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 206 * abstract out these details 207 */ 208 int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) 209 { 210 struct cpdma_params *cpdma_params = &ctlr->params; 211 struct cpdma_desc_pool *pool; 212 int ret = -ENOMEM; 213 214 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); 215 if (!pool) 216 goto gen_pool_create_fail; 217 ctlr->pool = pool; 218 219 pool->mem_size = cpdma_params->desc_mem_size; 220 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), 221 cpdma_params->desc_align); 222 pool->num_desc = pool->mem_size / pool->desc_size; 223 224 if (cpdma_params->descs_pool_size) { 225 /* recalculate memory size required cpdma descriptor pool 226 * basing on number of descriptors specified by user and 227 * if memory size > CPPI internal RAM size (desc_mem_size) 228 * then switch to use DDR 229 */ 230 pool->num_desc = cpdma_params->descs_pool_size; 231 pool->mem_size = pool->desc_size * pool->num_desc; 232 if (pool->mem_size > cpdma_params->desc_mem_size) 233 cpdma_params->desc_mem_phys = 0; 234 } 235 236 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), 237 -1, "cpdma"); 238 if (IS_ERR(pool->gen_pool)) { 239 ret = PTR_ERR(pool->gen_pool); 240 dev_err(ctlr->dev, "pool create failed %d\n", ret); 241 goto gen_pool_create_fail; 242 } 243 244 if (cpdma_params->desc_mem_phys) { 245 pool->phys = cpdma_params->desc_mem_phys; 246 pool->iomap = devm_ioremap(ctlr->dev, pool->phys, 247 pool->mem_size); 248 pool->hw_addr = cpdma_params->desc_hw_addr; 249 } else { 250 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, 251 &pool->hw_addr, GFP_KERNEL); 252 pool->iomap = (void __iomem __force *)pool->cpumap; 253 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 254 } 255 256 if (!pool->iomap) 257 goto gen_pool_create_fail; 258 259 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, 260 pool->phys, pool->mem_size, -1); 261 if (ret < 0) { 262 dev_err(ctlr->dev, "pool add failed %d\n", ret); 263 goto gen_pool_add_virt_fail; 264 } 265 266 return 0; 267 268 gen_pool_add_virt_fail: 269 cpdma_desc_pool_destroy(ctlr); 270 gen_pool_create_fail: 271 ctlr->pool = NULL; 272 return ret; 273 } 274 275 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 276 struct cpdma_desc __iomem *desc) 277 { 278 if (!desc) 279 return 0; 280 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; 281 } 282 283 static inline struct cpdma_desc __iomem * 284 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 285 { 286 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 287 } 288 289 static struct cpdma_desc __iomem * 290 cpdma_desc_alloc(struct cpdma_desc_pool *pool) 291 { 292 return (struct cpdma_desc __iomem *) 293 gen_pool_alloc(pool->gen_pool, pool->desc_size); 294 } 295 296 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 297 struct cpdma_desc __iomem *desc, int num_desc) 298 { 299 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); 300 } 301 302 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 303 { 304 struct cpdma_control_info *info = &controls[control]; 305 u32 val; 306 307 if (!ctlr->params.has_ext_regs) 308 return -ENOTSUPP; 309 310 if (ctlr->state != CPDMA_STATE_ACTIVE) 311 return -EINVAL; 312 313 if (control < 0 || control >= ARRAY_SIZE(controls)) 314 return -ENOENT; 315 316 if ((info->access & ACCESS_WO) != ACCESS_WO) 317 return -EPERM; 318 319 val = dma_reg_read(ctlr, info->reg); 320 val &= ~(info->mask << info->shift); 321 val |= (value & info->mask) << info->shift; 322 dma_reg_write(ctlr, info->reg, val); 323 324 return 0; 325 } 326 327 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 328 { 329 struct cpdma_control_info *info = &controls[control]; 330 int ret; 331 332 if (!ctlr->params.has_ext_regs) 333 return -ENOTSUPP; 334 335 if (ctlr->state != CPDMA_STATE_ACTIVE) 336 return -EINVAL; 337 338 if (control < 0 || control >= ARRAY_SIZE(controls)) 339 return -ENOENT; 340 341 if ((info->access & ACCESS_RO) != ACCESS_RO) 342 return -EPERM; 343 344 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 345 return ret; 346 } 347 348 /* cpdma_chan_set_chan_shaper - set shaper for a channel 349 * Has to be called under ctlr lock 350 */ 351 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan) 352 { 353 struct cpdma_ctlr *ctlr = chan->ctlr; 354 u32 rate_reg; 355 u32 rmask; 356 int ret; 357 358 if (!chan->rate) 359 return 0; 360 361 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num; 362 dma_reg_write(ctlr, rate_reg, chan->rate_factor); 363 364 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM); 365 rmask |= chan->mask; 366 367 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 368 return ret; 369 } 370 371 static int cpdma_chan_on(struct cpdma_chan *chan) 372 { 373 struct cpdma_ctlr *ctlr = chan->ctlr; 374 struct cpdma_desc_pool *pool = ctlr->pool; 375 unsigned long flags; 376 377 spin_lock_irqsave(&chan->lock, flags); 378 if (chan->state != CPDMA_STATE_IDLE) { 379 spin_unlock_irqrestore(&chan->lock, flags); 380 return -EBUSY; 381 } 382 if (ctlr->state != CPDMA_STATE_ACTIVE) { 383 spin_unlock_irqrestore(&chan->lock, flags); 384 return -EINVAL; 385 } 386 dma_reg_write(ctlr, chan->int_set, chan->mask); 387 chan->state = CPDMA_STATE_ACTIVE; 388 if (chan->head) { 389 chan_write(chan, hdp, desc_phys(pool, chan->head)); 390 if (chan->rxfree) 391 chan_write(chan, rxfree, chan->count); 392 } 393 394 spin_unlock_irqrestore(&chan->lock, flags); 395 return 0; 396 } 397 398 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible. 399 * rmask - mask of rate limited channels 400 * Returns min rate in Kb/s 401 */ 402 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, 403 u32 *rmask, int *prio_mode) 404 { 405 struct cpdma_ctlr *ctlr = ch->ctlr; 406 struct cpdma_chan *chan; 407 u32 old_rate = ch->rate; 408 u32 new_rmask = 0; 409 int rlim = 1; 410 int i; 411 412 *prio_mode = 0; 413 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { 414 chan = ctlr->channels[i]; 415 if (!chan) { 416 rlim = 0; 417 continue; 418 } 419 420 if (chan == ch) 421 chan->rate = rate; 422 423 if (chan->rate) { 424 if (rlim) { 425 new_rmask |= chan->mask; 426 } else { 427 ch->rate = old_rate; 428 dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n", 429 chan->chan_num); 430 return -EINVAL; 431 } 432 } else { 433 *prio_mode = 1; 434 rlim = 0; 435 } 436 } 437 438 *rmask = new_rmask; 439 return 0; 440 } 441 442 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, 443 struct cpdma_chan *ch) 444 { 445 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX; 446 u32 best_send_cnt = 0, best_idle_cnt = 0; 447 u32 new_rate, best_rate = 0, rate_reg; 448 u64 send_cnt, idle_cnt; 449 u32 min_send_cnt, freq; 450 u64 divident, divisor; 451 452 if (!ch->rate) { 453 ch->rate_factor = 0; 454 goto set_factor; 455 } 456 457 freq = ctlr->params.bus_freq_mhz * 1000 * 32; 458 if (!freq) { 459 dev_err(ctlr->dev, "The bus frequency is not set\n"); 460 return -EINVAL; 461 } 462 463 min_send_cnt = freq - ch->rate; 464 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate); 465 while (send_cnt <= CPDMA_MAX_RLIM_CNT) { 466 divident = ch->rate * send_cnt; 467 divisor = min_send_cnt; 468 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor); 469 470 divident = freq * idle_cnt; 471 divisor = idle_cnt + send_cnt; 472 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor); 473 474 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta; 475 if (delta < best_delta) { 476 best_delta = delta; 477 best_send_cnt = send_cnt; 478 best_idle_cnt = idle_cnt; 479 best_rate = new_rate; 480 481 if (!delta) 482 break; 483 } 484 485 if (prev_delta >= delta) { 486 prev_delta = delta; 487 send_cnt++; 488 continue; 489 } 490 491 idle_cnt++; 492 divident = freq * idle_cnt; 493 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate); 494 send_cnt -= idle_cnt; 495 prev_delta = UINT_MAX; 496 } 497 498 ch->rate = best_rate; 499 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16); 500 501 set_factor: 502 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num; 503 dma_reg_write(ctlr, rate_reg, ch->rate_factor); 504 return 0; 505 } 506 507 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 508 { 509 struct cpdma_ctlr *ctlr; 510 511 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); 512 if (!ctlr) 513 return NULL; 514 515 ctlr->state = CPDMA_STATE_IDLE; 516 ctlr->params = *params; 517 ctlr->dev = params->dev; 518 ctlr->chan_num = 0; 519 spin_lock_init(&ctlr->lock); 520 521 if (cpdma_desc_pool_create(ctlr)) 522 return NULL; 523 /* split pool equally between RX/TX by default */ 524 ctlr->num_tx_desc = ctlr->pool->num_desc / 2; 525 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; 526 527 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 528 ctlr->num_chan = CPDMA_MAX_CHANNELS; 529 return ctlr; 530 } 531 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 532 533 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 534 { 535 struct cpdma_chan *chan; 536 unsigned long flags; 537 int i, prio_mode; 538 539 spin_lock_irqsave(&ctlr->lock, flags); 540 if (ctlr->state != CPDMA_STATE_IDLE) { 541 spin_unlock_irqrestore(&ctlr->lock, flags); 542 return -EBUSY; 543 } 544 545 if (ctlr->params.has_soft_reset) { 546 unsigned timeout = 10 * 100; 547 548 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 549 while (timeout) { 550 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 551 break; 552 udelay(10); 553 timeout--; 554 } 555 WARN_ON(!timeout); 556 } 557 558 for (i = 0; i < ctlr->num_chan; i++) { 559 writel(0, ctlr->params.txhdp + 4 * i); 560 writel(0, ctlr->params.rxhdp + 4 * i); 561 writel(0, ctlr->params.txcp + 4 * i); 562 writel(0, ctlr->params.rxcp + 4 * i); 563 } 564 565 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 566 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 567 568 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 569 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 570 571 ctlr->state = CPDMA_STATE_ACTIVE; 572 573 prio_mode = 0; 574 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 575 chan = ctlr->channels[i]; 576 if (chan) { 577 cpdma_chan_set_chan_shaper(chan); 578 cpdma_chan_on(chan); 579 580 /* off prio mode if all tx channels are rate limited */ 581 if (is_tx_chan(chan) && !chan->rate) 582 prio_mode = 1; 583 } 584 } 585 586 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 587 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); 588 589 spin_unlock_irqrestore(&ctlr->lock, flags); 590 return 0; 591 } 592 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 593 594 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 595 { 596 unsigned long flags; 597 int i; 598 599 spin_lock_irqsave(&ctlr->lock, flags); 600 if (ctlr->state != CPDMA_STATE_ACTIVE) { 601 spin_unlock_irqrestore(&ctlr->lock, flags); 602 return -EINVAL; 603 } 604 605 ctlr->state = CPDMA_STATE_TEARDOWN; 606 spin_unlock_irqrestore(&ctlr->lock, flags); 607 608 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 609 if (ctlr->channels[i]) 610 cpdma_chan_stop(ctlr->channels[i]); 611 } 612 613 spin_lock_irqsave(&ctlr->lock, flags); 614 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 615 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 616 617 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 618 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 619 620 ctlr->state = CPDMA_STATE_IDLE; 621 622 spin_unlock_irqrestore(&ctlr->lock, flags); 623 return 0; 624 } 625 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 626 627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 628 { 629 int ret = 0, i; 630 631 if (!ctlr) 632 return -EINVAL; 633 634 if (ctlr->state != CPDMA_STATE_IDLE) 635 cpdma_ctlr_stop(ctlr); 636 637 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 638 cpdma_chan_destroy(ctlr->channels[i]); 639 640 cpdma_desc_pool_destroy(ctlr); 641 return ret; 642 } 643 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 644 645 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 646 { 647 unsigned long flags; 648 int i; 649 650 spin_lock_irqsave(&ctlr->lock, flags); 651 if (ctlr->state != CPDMA_STATE_ACTIVE) { 652 spin_unlock_irqrestore(&ctlr->lock, flags); 653 return -EINVAL; 654 } 655 656 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 657 if (ctlr->channels[i]) 658 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 659 } 660 661 spin_unlock_irqrestore(&ctlr->lock, flags); 662 return 0; 663 } 664 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 665 666 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 667 { 668 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 669 } 670 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 671 672 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) 673 { 674 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); 675 } 676 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); 677 678 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) 679 { 680 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); 681 } 682 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); 683 684 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, 685 int rx, int desc_num, 686 int per_ch_desc) 687 { 688 struct cpdma_chan *chan, *most_chan = NULL; 689 int desc_cnt = desc_num; 690 int most_dnum = 0; 691 int min, max, i; 692 693 if (!desc_num) 694 return; 695 696 if (rx) { 697 min = rx_chan_num(0); 698 max = rx_chan_num(CPDMA_MAX_CHANNELS); 699 } else { 700 min = tx_chan_num(0); 701 max = tx_chan_num(CPDMA_MAX_CHANNELS); 702 } 703 704 for (i = min; i < max; i++) { 705 chan = ctlr->channels[i]; 706 if (!chan) 707 continue; 708 709 if (chan->weight) 710 chan->desc_num = (chan->weight * desc_num) / 100; 711 else 712 chan->desc_num = per_ch_desc; 713 714 desc_cnt -= chan->desc_num; 715 716 if (most_dnum < chan->desc_num) { 717 most_dnum = chan->desc_num; 718 most_chan = chan; 719 } 720 } 721 /* use remains */ 722 if (most_chan) 723 most_chan->desc_num += desc_cnt; 724 } 725 726 /** 727 * cpdma_chan_split_pool - Splits ctrl pool between all channels. 728 * Has to be called under ctlr lock 729 */ 730 int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) 731 { 732 int tx_per_ch_desc = 0, rx_per_ch_desc = 0; 733 int free_rx_num = 0, free_tx_num = 0; 734 int rx_weight = 0, tx_weight = 0; 735 int tx_desc_num, rx_desc_num; 736 struct cpdma_chan *chan; 737 int i; 738 739 if (!ctlr->chan_num) 740 return 0; 741 742 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 743 chan = ctlr->channels[i]; 744 if (!chan) 745 continue; 746 747 if (is_rx_chan(chan)) { 748 if (!chan->weight) 749 free_rx_num++; 750 rx_weight += chan->weight; 751 } else { 752 if (!chan->weight) 753 free_tx_num++; 754 tx_weight += chan->weight; 755 } 756 } 757 758 if (rx_weight > 100 || tx_weight > 100) 759 return -EINVAL; 760 761 tx_desc_num = ctlr->num_tx_desc; 762 rx_desc_num = ctlr->num_rx_desc; 763 764 if (free_tx_num) { 765 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; 766 tx_per_ch_desc /= free_tx_num; 767 } 768 if (free_rx_num) { 769 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100; 770 rx_per_ch_desc /= free_rx_num; 771 } 772 773 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); 774 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); 775 776 return 0; 777 } 778 EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); 779 780 781 /* cpdma_chan_set_weight - set weight of a channel in percentage. 782 * Tx and Rx channels have separate weights. That is 100% for RX 783 * and 100% for Tx. The weight is used to split cpdma resources 784 * in correct proportion required by the channels, including number 785 * of descriptors. The channel rate is not enough to know the 786 * weight of a channel as the maximum rate of an interface is needed. 787 * If weight = 0, then channel uses rest of descriptors leaved by 788 * weighted channels. 789 */ 790 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) 791 { 792 struct cpdma_ctlr *ctlr = ch->ctlr; 793 unsigned long flags, ch_flags; 794 int ret; 795 796 spin_lock_irqsave(&ctlr->lock, flags); 797 spin_lock_irqsave(&ch->lock, ch_flags); 798 if (ch->weight == weight) { 799 spin_unlock_irqrestore(&ch->lock, ch_flags); 800 spin_unlock_irqrestore(&ctlr->lock, flags); 801 return 0; 802 } 803 ch->weight = weight; 804 spin_unlock_irqrestore(&ch->lock, ch_flags); 805 806 /* re-split pool using new channel weight */ 807 ret = cpdma_chan_split_pool(ctlr); 808 spin_unlock_irqrestore(&ctlr->lock, flags); 809 return ret; 810 } 811 EXPORT_SYMBOL_GPL(cpdma_chan_set_weight); 812 813 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel 814 * Should be called before cpdma_chan_set_rate. 815 * Returns min rate in Kb/s 816 */ 817 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) 818 { 819 unsigned int divident, divisor; 820 821 divident = ctlr->params.bus_freq_mhz * 32 * 1000; 822 divisor = 1 + CPDMA_MAX_RLIM_CNT; 823 824 return DIV_ROUND_UP(divident, divisor); 825 } 826 EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); 827 828 /* cpdma_chan_set_rate - limits bandwidth for transmit channel. 829 * The bandwidth * limited channels have to be in order beginning from lowest. 830 * ch - transmit channel the bandwidth is configured for 831 * rate - bandwidth in Kb/s, if 0 - then off shaper 832 */ 833 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) 834 { 835 unsigned long flags, ch_flags; 836 struct cpdma_ctlr *ctlr; 837 int ret, prio_mode; 838 u32 rmask; 839 840 if (!ch || !is_tx_chan(ch)) 841 return -EINVAL; 842 843 if (ch->rate == rate) 844 return rate; 845 846 ctlr = ch->ctlr; 847 spin_lock_irqsave(&ctlr->lock, flags); 848 spin_lock_irqsave(&ch->lock, ch_flags); 849 850 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode); 851 if (ret) 852 goto err; 853 854 ret = cpdma_chan_set_factors(ctlr, ch); 855 if (ret) 856 goto err; 857 858 spin_unlock_irqrestore(&ch->lock, ch_flags); 859 860 /* on shapers */ 861 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 862 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 863 spin_unlock_irqrestore(&ctlr->lock, flags); 864 return ret; 865 866 err: 867 spin_unlock_irqrestore(&ch->lock, ch_flags); 868 spin_unlock_irqrestore(&ctlr->lock, flags); 869 return ret; 870 } 871 EXPORT_SYMBOL_GPL(cpdma_chan_set_rate); 872 873 u32 cpdma_chan_get_rate(struct cpdma_chan *ch) 874 { 875 unsigned long flags; 876 u32 rate; 877 878 spin_lock_irqsave(&ch->lock, flags); 879 rate = ch->rate; 880 spin_unlock_irqrestore(&ch->lock, flags); 881 882 return rate; 883 } 884 EXPORT_SYMBOL_GPL(cpdma_chan_get_rate); 885 886 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 887 cpdma_handler_fn handler, int rx_type) 888 { 889 int offset = chan_num * 4; 890 struct cpdma_chan *chan; 891 unsigned long flags; 892 893 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); 894 895 if (__chan_linear(chan_num) >= ctlr->num_chan) 896 return ERR_PTR(-EINVAL); 897 898 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); 899 if (!chan) 900 return ERR_PTR(-ENOMEM); 901 902 spin_lock_irqsave(&ctlr->lock, flags); 903 if (ctlr->channels[chan_num]) { 904 spin_unlock_irqrestore(&ctlr->lock, flags); 905 devm_kfree(ctlr->dev, chan); 906 return ERR_PTR(-EBUSY); 907 } 908 909 chan->ctlr = ctlr; 910 chan->state = CPDMA_STATE_IDLE; 911 chan->chan_num = chan_num; 912 chan->handler = handler; 913 chan->rate = 0; 914 chan->weight = 0; 915 916 if (is_rx_chan(chan)) { 917 chan->hdp = ctlr->params.rxhdp + offset; 918 chan->cp = ctlr->params.rxcp + offset; 919 chan->rxfree = ctlr->params.rxfree + offset; 920 chan->int_set = CPDMA_RXINTMASKSET; 921 chan->int_clear = CPDMA_RXINTMASKCLEAR; 922 chan->td = CPDMA_RXTEARDOWN; 923 chan->dir = DMA_FROM_DEVICE; 924 } else { 925 chan->hdp = ctlr->params.txhdp + offset; 926 chan->cp = ctlr->params.txcp + offset; 927 chan->int_set = CPDMA_TXINTMASKSET; 928 chan->int_clear = CPDMA_TXINTMASKCLEAR; 929 chan->td = CPDMA_TXTEARDOWN; 930 chan->dir = DMA_TO_DEVICE; 931 } 932 chan->mask = BIT(chan_linear(chan)); 933 934 spin_lock_init(&chan->lock); 935 936 ctlr->channels[chan_num] = chan; 937 ctlr->chan_num++; 938 939 cpdma_chan_split_pool(ctlr); 940 941 spin_unlock_irqrestore(&ctlr->lock, flags); 942 return chan; 943 } 944 EXPORT_SYMBOL_GPL(cpdma_chan_create); 945 946 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) 947 { 948 unsigned long flags; 949 int desc_num; 950 951 spin_lock_irqsave(&chan->lock, flags); 952 desc_num = chan->desc_num; 953 spin_unlock_irqrestore(&chan->lock, flags); 954 955 return desc_num; 956 } 957 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); 958 959 int cpdma_chan_destroy(struct cpdma_chan *chan) 960 { 961 struct cpdma_ctlr *ctlr; 962 unsigned long flags; 963 964 if (!chan) 965 return -EINVAL; 966 ctlr = chan->ctlr; 967 968 spin_lock_irqsave(&ctlr->lock, flags); 969 if (chan->state != CPDMA_STATE_IDLE) 970 cpdma_chan_stop(chan); 971 ctlr->channels[chan->chan_num] = NULL; 972 ctlr->chan_num--; 973 devm_kfree(ctlr->dev, chan); 974 cpdma_chan_split_pool(ctlr); 975 976 spin_unlock_irqrestore(&ctlr->lock, flags); 977 return 0; 978 } 979 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 980 981 int cpdma_chan_get_stats(struct cpdma_chan *chan, 982 struct cpdma_chan_stats *stats) 983 { 984 unsigned long flags; 985 if (!chan) 986 return -EINVAL; 987 spin_lock_irqsave(&chan->lock, flags); 988 memcpy(stats, &chan->stats, sizeof(*stats)); 989 spin_unlock_irqrestore(&chan->lock, flags); 990 return 0; 991 } 992 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 993 994 static void __cpdma_chan_submit(struct cpdma_chan *chan, 995 struct cpdma_desc __iomem *desc) 996 { 997 struct cpdma_ctlr *ctlr = chan->ctlr; 998 struct cpdma_desc __iomem *prev = chan->tail; 999 struct cpdma_desc_pool *pool = ctlr->pool; 1000 dma_addr_t desc_dma; 1001 u32 mode; 1002 1003 desc_dma = desc_phys(pool, desc); 1004 1005 /* simple case - idle channel */ 1006 if (!chan->head) { 1007 chan->stats.head_enqueue++; 1008 chan->head = desc; 1009 chan->tail = desc; 1010 if (chan->state == CPDMA_STATE_ACTIVE) 1011 chan_write(chan, hdp, desc_dma); 1012 return; 1013 } 1014 1015 /* first chain the descriptor at the tail of the list */ 1016 desc_write(prev, hw_next, desc_dma); 1017 chan->tail = desc; 1018 chan->stats.tail_enqueue++; 1019 1020 /* next check if EOQ has been triggered already */ 1021 mode = desc_read(prev, hw_mode); 1022 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 1023 (chan->state == CPDMA_STATE_ACTIVE)) { 1024 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 1025 chan_write(chan, hdp, desc_dma); 1026 chan->stats.misqueued++; 1027 } 1028 } 1029 1030 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 1031 int len, int directed) 1032 { 1033 struct cpdma_ctlr *ctlr = chan->ctlr; 1034 struct cpdma_desc __iomem *desc; 1035 dma_addr_t buffer; 1036 unsigned long flags; 1037 u32 mode; 1038 int ret = 0; 1039 1040 spin_lock_irqsave(&chan->lock, flags); 1041 1042 if (chan->state == CPDMA_STATE_TEARDOWN) { 1043 ret = -EINVAL; 1044 goto unlock_ret; 1045 } 1046 1047 if (chan->count >= chan->desc_num) { 1048 chan->stats.desc_alloc_fail++; 1049 ret = -ENOMEM; 1050 goto unlock_ret; 1051 } 1052 1053 desc = cpdma_desc_alloc(ctlr->pool); 1054 if (!desc) { 1055 chan->stats.desc_alloc_fail++; 1056 ret = -ENOMEM; 1057 goto unlock_ret; 1058 } 1059 1060 if (len < ctlr->params.min_packet_size) { 1061 len = ctlr->params.min_packet_size; 1062 chan->stats.runt_transmit_buff++; 1063 } 1064 1065 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 1066 ret = dma_mapping_error(ctlr->dev, buffer); 1067 if (ret) { 1068 cpdma_desc_free(ctlr->pool, desc, 1); 1069 ret = -EINVAL; 1070 goto unlock_ret; 1071 } 1072 1073 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 1074 cpdma_desc_to_port(chan, mode, directed); 1075 1076 /* Relaxed IO accessors can be used here as there is read barrier 1077 * at the end of write sequence. 1078 */ 1079 writel_relaxed(0, &desc->hw_next); 1080 writel_relaxed(buffer, &desc->hw_buffer); 1081 writel_relaxed(len, &desc->hw_len); 1082 writel_relaxed(mode | len, &desc->hw_mode); 1083 writel_relaxed(token, &desc->sw_token); 1084 writel_relaxed(buffer, &desc->sw_buffer); 1085 writel_relaxed(len, &desc->sw_len); 1086 desc_read(desc, sw_len); 1087 1088 __cpdma_chan_submit(chan, desc); 1089 1090 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 1091 chan_write(chan, rxfree, 1); 1092 1093 chan->count++; 1094 1095 unlock_ret: 1096 spin_unlock_irqrestore(&chan->lock, flags); 1097 return ret; 1098 } 1099 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 1100 1101 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 1102 { 1103 struct cpdma_ctlr *ctlr = chan->ctlr; 1104 struct cpdma_desc_pool *pool = ctlr->pool; 1105 bool free_tx_desc; 1106 unsigned long flags; 1107 1108 spin_lock_irqsave(&chan->lock, flags); 1109 free_tx_desc = (chan->count < chan->desc_num) && 1110 gen_pool_avail(pool->gen_pool); 1111 spin_unlock_irqrestore(&chan->lock, flags); 1112 return free_tx_desc; 1113 } 1114 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 1115 1116 static void __cpdma_chan_free(struct cpdma_chan *chan, 1117 struct cpdma_desc __iomem *desc, 1118 int outlen, int status) 1119 { 1120 struct cpdma_ctlr *ctlr = chan->ctlr; 1121 struct cpdma_desc_pool *pool = ctlr->pool; 1122 dma_addr_t buff_dma; 1123 int origlen; 1124 void *token; 1125 1126 token = (void *)desc_read(desc, sw_token); 1127 buff_dma = desc_read(desc, sw_buffer); 1128 origlen = desc_read(desc, sw_len); 1129 1130 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 1131 cpdma_desc_free(pool, desc, 1); 1132 (*chan->handler)(token, outlen, status); 1133 } 1134 1135 static int __cpdma_chan_process(struct cpdma_chan *chan) 1136 { 1137 struct cpdma_ctlr *ctlr = chan->ctlr; 1138 struct cpdma_desc __iomem *desc; 1139 int status, outlen; 1140 int cb_status = 0; 1141 struct cpdma_desc_pool *pool = ctlr->pool; 1142 dma_addr_t desc_dma; 1143 unsigned long flags; 1144 1145 spin_lock_irqsave(&chan->lock, flags); 1146 1147 desc = chan->head; 1148 if (!desc) { 1149 chan->stats.empty_dequeue++; 1150 status = -ENOENT; 1151 goto unlock_ret; 1152 } 1153 desc_dma = desc_phys(pool, desc); 1154 1155 status = desc_read(desc, hw_mode); 1156 outlen = status & 0x7ff; 1157 if (status & CPDMA_DESC_OWNER) { 1158 chan->stats.busy_dequeue++; 1159 status = -EBUSY; 1160 goto unlock_ret; 1161 } 1162 1163 if (status & CPDMA_DESC_PASS_CRC) 1164 outlen -= CPDMA_DESC_CRC_LEN; 1165 1166 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 1167 CPDMA_DESC_PORT_MASK); 1168 1169 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 1170 chan_write(chan, cp, desc_dma); 1171 chan->count--; 1172 chan->stats.good_dequeue++; 1173 1174 if ((status & CPDMA_DESC_EOQ) && chan->head) { 1175 chan->stats.requeue++; 1176 chan_write(chan, hdp, desc_phys(pool, chan->head)); 1177 } 1178 1179 spin_unlock_irqrestore(&chan->lock, flags); 1180 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 1181 cb_status = -ENOSYS; 1182 else 1183 cb_status = status; 1184 1185 __cpdma_chan_free(chan, desc, outlen, cb_status); 1186 return status; 1187 1188 unlock_ret: 1189 spin_unlock_irqrestore(&chan->lock, flags); 1190 return status; 1191 } 1192 1193 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 1194 { 1195 int used = 0, ret = 0; 1196 1197 if (chan->state != CPDMA_STATE_ACTIVE) 1198 return -EINVAL; 1199 1200 while (used < quota) { 1201 ret = __cpdma_chan_process(chan); 1202 if (ret < 0) 1203 break; 1204 used++; 1205 } 1206 return used; 1207 } 1208 EXPORT_SYMBOL_GPL(cpdma_chan_process); 1209 1210 int cpdma_chan_start(struct cpdma_chan *chan) 1211 { 1212 struct cpdma_ctlr *ctlr = chan->ctlr; 1213 unsigned long flags; 1214 int ret; 1215 1216 spin_lock_irqsave(&ctlr->lock, flags); 1217 ret = cpdma_chan_set_chan_shaper(chan); 1218 spin_unlock_irqrestore(&ctlr->lock, flags); 1219 if (ret) 1220 return ret; 1221 1222 ret = cpdma_chan_on(chan); 1223 if (ret) 1224 return ret; 1225 1226 return 0; 1227 } 1228 EXPORT_SYMBOL_GPL(cpdma_chan_start); 1229 1230 int cpdma_chan_stop(struct cpdma_chan *chan) 1231 { 1232 struct cpdma_ctlr *ctlr = chan->ctlr; 1233 struct cpdma_desc_pool *pool = ctlr->pool; 1234 unsigned long flags; 1235 int ret; 1236 unsigned timeout; 1237 1238 spin_lock_irqsave(&chan->lock, flags); 1239 if (chan->state == CPDMA_STATE_TEARDOWN) { 1240 spin_unlock_irqrestore(&chan->lock, flags); 1241 return -EINVAL; 1242 } 1243 1244 chan->state = CPDMA_STATE_TEARDOWN; 1245 dma_reg_write(ctlr, chan->int_clear, chan->mask); 1246 1247 /* trigger teardown */ 1248 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 1249 1250 /* wait for teardown complete */ 1251 timeout = 100 * 100; /* 100 ms */ 1252 while (timeout) { 1253 u32 cp = chan_read(chan, cp); 1254 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 1255 break; 1256 udelay(10); 1257 timeout--; 1258 } 1259 WARN_ON(!timeout); 1260 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 1261 1262 /* handle completed packets */ 1263 spin_unlock_irqrestore(&chan->lock, flags); 1264 do { 1265 ret = __cpdma_chan_process(chan); 1266 if (ret < 0) 1267 break; 1268 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 1269 spin_lock_irqsave(&chan->lock, flags); 1270 1271 /* remaining packets haven't been tx/rx'ed, clean them up */ 1272 while (chan->head) { 1273 struct cpdma_desc __iomem *desc = chan->head; 1274 dma_addr_t next_dma; 1275 1276 next_dma = desc_read(desc, hw_next); 1277 chan->head = desc_from_phys(pool, next_dma); 1278 chan->count--; 1279 chan->stats.teardown_dequeue++; 1280 1281 /* issue callback without locks held */ 1282 spin_unlock_irqrestore(&chan->lock, flags); 1283 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 1284 spin_lock_irqsave(&chan->lock, flags); 1285 } 1286 1287 chan->state = CPDMA_STATE_IDLE; 1288 spin_unlock_irqrestore(&chan->lock, flags); 1289 return 0; 1290 } 1291 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 1292 1293 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 1294 { 1295 unsigned long flags; 1296 1297 spin_lock_irqsave(&chan->lock, flags); 1298 if (chan->state != CPDMA_STATE_ACTIVE) { 1299 spin_unlock_irqrestore(&chan->lock, flags); 1300 return -EINVAL; 1301 } 1302 1303 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 1304 chan->mask); 1305 spin_unlock_irqrestore(&chan->lock, flags); 1306 1307 return 0; 1308 } 1309 1310 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 1311 { 1312 unsigned long flags; 1313 int ret; 1314 1315 spin_lock_irqsave(&ctlr->lock, flags); 1316 ret = _cpdma_control_get(ctlr, control); 1317 spin_unlock_irqrestore(&ctlr->lock, flags); 1318 1319 return ret; 1320 } 1321 1322 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1323 { 1324 unsigned long flags; 1325 int ret; 1326 1327 spin_lock_irqsave(&ctlr->lock, flags); 1328 ret = _cpdma_control_set(ctlr, control, value); 1329 spin_unlock_irqrestore(&ctlr->lock, flags); 1330 1331 return ret; 1332 } 1333 EXPORT_SYMBOL_GPL(cpdma_control_set); 1334 1335 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) 1336 { 1337 return ctlr->num_rx_desc; 1338 } 1339 EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); 1340 1341 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) 1342 { 1343 return ctlr->num_tx_desc; 1344 } 1345 EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); 1346 1347 void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) 1348 { 1349 ctlr->num_rx_desc = num_rx_desc; 1350 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; 1351 } 1352 EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); 1353 1354 MODULE_LICENSE("GPL"); 1355