1 /* 2 * DMM IOMMU driver support functions for TI OMAP processors. 3 * 4 * Author: Rob Clark <rob@ti.com> 5 * Andy Gross <andy.gross@ti.com> 6 * 7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License as 11 * published by the Free Software Foundation version 2. 12 * 13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 14 * kind, whether express or implied; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/completion.h> 20 #include <linux/delay.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/list.h> 26 #include <linux/mm.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> /* platform_device() */ 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/time.h> 32 #include <linux/vmalloc.h> 33 #include <linux/wait.h> 34 35 #include "omap_dmm_tiler.h" 36 #include "omap_dmm_priv.h" 37 38 #define DMM_DRIVER_NAME "dmm" 39 40 /* mappings for associating views to luts */ 41 static struct tcm *containers[TILFMT_NFORMATS]; 42 static struct dmm *omap_dmm; 43 44 #if defined(CONFIG_OF) 45 static const struct of_device_id dmm_of_match[]; 46 #endif 47 48 /* global spinlock for protecting lists */ 49 static DEFINE_SPINLOCK(list_lock); 50 51 /* Geometry table */ 52 #define GEOM(xshift, yshift, bytes_per_pixel) { \ 53 .x_shft = (xshift), \ 54 .y_shft = (yshift), \ 55 .cpp = (bytes_per_pixel), \ 56 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \ 57 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \ 58 } 59 60 static const struct { 61 uint32_t x_shft; /* unused X-bits (as part of bpp) */ 62 uint32_t y_shft; /* unused Y-bits (as part of bpp) */ 63 uint32_t cpp; /* bytes/chars per pixel */ 64 uint32_t slot_w; /* width of each slot (in pixels) */ 65 uint32_t slot_h; /* height of each slot (in pixels) */ 66 } geom[TILFMT_NFORMATS] = { 67 [TILFMT_8BIT] = GEOM(0, 0, 1), 68 [TILFMT_16BIT] = GEOM(0, 1, 2), 69 [TILFMT_32BIT] = GEOM(1, 1, 4), 70 [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1), 71 }; 72 73 74 /* lookup table for registers w/ per-engine instances */ 75 static const uint32_t reg[][4] = { 76 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, 77 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, 78 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, 79 DMM_PAT_DESCR__2, DMM_PAT_DESCR__3}, 80 }; 81 82 /* simple allocator to grab next 16 byte aligned memory from txn */ 83 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa) 84 { 85 void *ptr; 86 struct refill_engine *engine = txn->engine_handle; 87 88 /* dmm programming requires 16 byte aligned addresses */ 89 txn->current_pa = round_up(txn->current_pa, 16); 90 txn->current_va = (void *)round_up((long)txn->current_va, 16); 91 92 ptr = txn->current_va; 93 *pa = txn->current_pa; 94 95 txn->current_pa += sz; 96 txn->current_va += sz; 97 98 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE); 99 100 return ptr; 101 } 102 103 /* check status and spin until wait_mask comes true */ 104 static int wait_status(struct refill_engine *engine, uint32_t wait_mask) 105 { 106 struct dmm *dmm = engine->dmm; 107 uint32_t r = 0, err, i; 108 109 i = DMM_FIXED_RETRY_COUNT; 110 while (true) { 111 r = readl(dmm->base + reg[PAT_STATUS][engine->id]); 112 err = r & DMM_PATSTATUS_ERR; 113 if (err) 114 return -EFAULT; 115 116 if ((r & wait_mask) == wait_mask) 117 break; 118 119 if (--i == 0) 120 return -ETIMEDOUT; 121 122 udelay(1); 123 } 124 125 return 0; 126 } 127 128 static void release_engine(struct refill_engine *engine) 129 { 130 unsigned long flags; 131 132 spin_lock_irqsave(&list_lock, flags); 133 list_add(&engine->idle_node, &omap_dmm->idle_head); 134 spin_unlock_irqrestore(&list_lock, flags); 135 136 atomic_inc(&omap_dmm->engine_counter); 137 wake_up_interruptible(&omap_dmm->engine_queue); 138 } 139 140 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) 141 { 142 struct dmm *dmm = arg; 143 uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS); 144 int i; 145 146 /* ack IRQ */ 147 writel(status, dmm->base + DMM_PAT_IRQSTATUS); 148 149 for (i = 0; i < dmm->num_engines; i++) { 150 if (status & DMM_IRQSTAT_LST) { 151 if (dmm->engines[i].async) 152 release_engine(&dmm->engines[i]); 153 154 complete(&dmm->engines[i].compl); 155 } 156 157 status >>= 8; 158 } 159 160 return IRQ_HANDLED; 161 } 162 163 /** 164 * Get a handle for a DMM transaction 165 */ 166 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) 167 { 168 struct dmm_txn *txn = NULL; 169 struct refill_engine *engine = NULL; 170 int ret; 171 unsigned long flags; 172 173 174 /* wait until an engine is available */ 175 ret = wait_event_interruptible(omap_dmm->engine_queue, 176 atomic_add_unless(&omap_dmm->engine_counter, -1, 0)); 177 if (ret) 178 return ERR_PTR(ret); 179 180 /* grab an idle engine */ 181 spin_lock_irqsave(&list_lock, flags); 182 if (!list_empty(&dmm->idle_head)) { 183 engine = list_entry(dmm->idle_head.next, struct refill_engine, 184 idle_node); 185 list_del(&engine->idle_node); 186 } 187 spin_unlock_irqrestore(&list_lock, flags); 188 189 BUG_ON(!engine); 190 191 txn = &engine->txn; 192 engine->tcm = tcm; 193 txn->engine_handle = engine; 194 txn->last_pat = NULL; 195 txn->current_va = engine->refill_va; 196 txn->current_pa = engine->refill_pa; 197 198 return txn; 199 } 200 201 /** 202 * Add region to DMM transaction. If pages or pages[i] is NULL, then the 203 * corresponding slot is cleared (ie. dummy_pa is programmed) 204 */ 205 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, 206 struct page **pages, uint32_t npages, uint32_t roll) 207 { 208 dma_addr_t pat_pa = 0, data_pa = 0; 209 uint32_t *data; 210 struct pat *pat; 211 struct refill_engine *engine = txn->engine_handle; 212 int columns = (1 + area->x1 - area->x0); 213 int rows = (1 + area->y1 - area->y0); 214 int i = columns*rows; 215 216 pat = alloc_dma(txn, sizeof(struct pat), &pat_pa); 217 218 if (txn->last_pat) 219 txn->last_pat->next_pa = (uint32_t)pat_pa; 220 221 pat->area = *area; 222 223 /* adjust Y coordinates based off of container parameters */ 224 pat->area.y0 += engine->tcm->y_offset; 225 pat->area.y1 += engine->tcm->y_offset; 226 227 pat->ctrl = (struct pat_ctrl){ 228 .start = 1, 229 .lut_id = engine->tcm->lut_id, 230 }; 231 232 data = alloc_dma(txn, 4*i, &data_pa); 233 /* FIXME: what if data_pa is more than 32-bit ? */ 234 pat->data_pa = data_pa; 235 236 while (i--) { 237 int n = i + roll; 238 if (n >= npages) 239 n -= npages; 240 data[i] = (pages && pages[n]) ? 241 page_to_phys(pages[n]) : engine->dmm->dummy_pa; 242 } 243 244 txn->last_pat = pat; 245 246 return; 247 } 248 249 /** 250 * Commit the DMM transaction. 251 */ 252 static int dmm_txn_commit(struct dmm_txn *txn, bool wait) 253 { 254 int ret = 0; 255 struct refill_engine *engine = txn->engine_handle; 256 struct dmm *dmm = engine->dmm; 257 258 if (!txn->last_pat) { 259 dev_err(engine->dmm->dev, "need at least one txn\n"); 260 ret = -EINVAL; 261 goto cleanup; 262 } 263 264 txn->last_pat->next_pa = 0; 265 266 /* write to PAT_DESCR to clear out any pending transaction */ 267 writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]); 268 269 /* wait for engine ready: */ 270 ret = wait_status(engine, DMM_PATSTATUS_READY); 271 if (ret) { 272 ret = -EFAULT; 273 goto cleanup; 274 } 275 276 /* mark whether it is async to denote list management in IRQ handler */ 277 engine->async = wait ? false : true; 278 reinit_completion(&engine->compl); 279 /* verify that the irq handler sees the 'async' and completion value */ 280 smp_mb(); 281 282 /* kick reload */ 283 writel(engine->refill_pa, 284 dmm->base + reg[PAT_DESCR][engine->id]); 285 286 if (wait) { 287 if (!wait_for_completion_timeout(&engine->compl, 288 msecs_to_jiffies(100))) { 289 dev_err(dmm->dev, "timed out waiting for done\n"); 290 ret = -ETIMEDOUT; 291 } 292 } 293 294 cleanup: 295 /* only place engine back on list if we are done with it */ 296 if (ret || wait) 297 release_engine(engine); 298 299 return ret; 300 } 301 302 /* 303 * DMM programming 304 */ 305 static int fill(struct tcm_area *area, struct page **pages, 306 uint32_t npages, uint32_t roll, bool wait) 307 { 308 int ret = 0; 309 struct tcm_area slice, area_s; 310 struct dmm_txn *txn; 311 312 txn = dmm_txn_init(omap_dmm, area->tcm); 313 if (IS_ERR_OR_NULL(txn)) 314 return -ENOMEM; 315 316 tcm_for_each_slice(slice, *area, area_s) { 317 struct pat_area p_area = { 318 .x0 = slice.p0.x, .y0 = slice.p0.y, 319 .x1 = slice.p1.x, .y1 = slice.p1.y, 320 }; 321 322 dmm_txn_append(txn, &p_area, pages, npages, roll); 323 324 roll += tcm_sizeof(slice); 325 } 326 327 ret = dmm_txn_commit(txn, wait); 328 329 return ret; 330 } 331 332 /* 333 * Pin/unpin 334 */ 335 336 /* note: slots for which pages[i] == NULL are filled w/ dummy page 337 */ 338 int tiler_pin(struct tiler_block *block, struct page **pages, 339 uint32_t npages, uint32_t roll, bool wait) 340 { 341 int ret; 342 343 ret = fill(&block->area, pages, npages, roll, wait); 344 345 if (ret) 346 tiler_unpin(block); 347 348 return ret; 349 } 350 351 int tiler_unpin(struct tiler_block *block) 352 { 353 return fill(&block->area, NULL, 0, 0, false); 354 } 355 356 /* 357 * Reserve/release 358 */ 359 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, 360 uint16_t h, uint16_t align) 361 { 362 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 363 u32 min_align = 128; 364 int ret; 365 unsigned long flags; 366 size_t slot_bytes; 367 368 BUG_ON(!validfmt(fmt)); 369 370 /* convert width/height to slots */ 371 w = DIV_ROUND_UP(w, geom[fmt].slot_w); 372 h = DIV_ROUND_UP(h, geom[fmt].slot_h); 373 374 /* convert alignment to slots */ 375 slot_bytes = geom[fmt].slot_w * geom[fmt].cpp; 376 min_align = max(min_align, slot_bytes); 377 align = (align > min_align) ? ALIGN(align, min_align) : min_align; 378 align /= slot_bytes; 379 380 block->fmt = fmt; 381 382 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes, 383 &block->area); 384 if (ret) { 385 kfree(block); 386 return ERR_PTR(-ENOMEM); 387 } 388 389 /* add to allocation list */ 390 spin_lock_irqsave(&list_lock, flags); 391 list_add(&block->alloc_node, &omap_dmm->alloc_head); 392 spin_unlock_irqrestore(&list_lock, flags); 393 394 return block; 395 } 396 397 struct tiler_block *tiler_reserve_1d(size_t size) 398 { 399 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 400 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 401 unsigned long flags; 402 403 if (!block) 404 return ERR_PTR(-ENOMEM); 405 406 block->fmt = TILFMT_PAGE; 407 408 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages, 409 &block->area)) { 410 kfree(block); 411 return ERR_PTR(-ENOMEM); 412 } 413 414 spin_lock_irqsave(&list_lock, flags); 415 list_add(&block->alloc_node, &omap_dmm->alloc_head); 416 spin_unlock_irqrestore(&list_lock, flags); 417 418 return block; 419 } 420 421 /* note: if you have pin'd pages, you should have already unpin'd first! */ 422 int tiler_release(struct tiler_block *block) 423 { 424 int ret = tcm_free(&block->area); 425 unsigned long flags; 426 427 if (block->area.tcm) 428 dev_err(omap_dmm->dev, "failed to release block\n"); 429 430 spin_lock_irqsave(&list_lock, flags); 431 list_del(&block->alloc_node); 432 spin_unlock_irqrestore(&list_lock, flags); 433 434 kfree(block); 435 return ret; 436 } 437 438 /* 439 * Utils 440 */ 441 442 /* calculate the tiler space address of a pixel in a view orientation... 443 * below description copied from the display subsystem section of TRM: 444 * 445 * When the TILER is addressed, the bits: 446 * [28:27] = 0x0 for 8-bit tiled 447 * 0x1 for 16-bit tiled 448 * 0x2 for 32-bit tiled 449 * 0x3 for page mode 450 * [31:29] = 0x0 for 0-degree view 451 * 0x1 for 180-degree view + mirroring 452 * 0x2 for 0-degree view + mirroring 453 * 0x3 for 180-degree view 454 * 0x4 for 270-degree view + mirroring 455 * 0x5 for 270-degree view 456 * 0x6 for 90-degree view 457 * 0x7 for 90-degree view + mirroring 458 * Otherwise the bits indicated the corresponding bit address to access 459 * the SDRAM. 460 */ 461 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y) 462 { 463 u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment; 464 465 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft; 466 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft; 467 alignment = geom[fmt].x_shft + geom[fmt].y_shft; 468 469 /* validate coordinate */ 470 x_mask = MASK(x_bits); 471 y_mask = MASK(y_bits); 472 473 if (x < 0 || x > x_mask || y < 0 || y > y_mask) { 474 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u", 475 x, x, x_mask, y, y, y_mask); 476 return 0; 477 } 478 479 /* account for mirroring */ 480 if (orient & MASK_X_INVERT) 481 x ^= x_mask; 482 if (orient & MASK_Y_INVERT) 483 y ^= y_mask; 484 485 /* get coordinate address */ 486 if (orient & MASK_XY_FLIP) 487 tmp = ((x << y_bits) + y); 488 else 489 tmp = ((y << x_bits) + x); 490 491 return TIL_ADDR((tmp << alignment), orient, fmt); 492 } 493 494 dma_addr_t tiler_ssptr(struct tiler_block *block) 495 { 496 BUG_ON(!validfmt(block->fmt)); 497 498 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0, 499 block->area.p0.x * geom[block->fmt].slot_w, 500 block->area.p0.y * geom[block->fmt].slot_h); 501 } 502 503 dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, 504 uint32_t x, uint32_t y) 505 { 506 struct tcm_pt *p = &block->area.p0; 507 BUG_ON(!validfmt(block->fmt)); 508 509 return tiler_get_address(block->fmt, orient, 510 (p->x * geom[block->fmt].slot_w) + x, 511 (p->y * geom[block->fmt].slot_h) + y); 512 } 513 514 void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h) 515 { 516 BUG_ON(!validfmt(fmt)); 517 *w = round_up(*w, geom[fmt].slot_w); 518 *h = round_up(*h, geom[fmt].slot_h); 519 } 520 521 uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient) 522 { 523 BUG_ON(!validfmt(fmt)); 524 525 if (orient & MASK_XY_FLIP) 526 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft); 527 else 528 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft); 529 } 530 531 size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h) 532 { 533 tiler_align(fmt, &w, &h); 534 return geom[fmt].cpp * w * h; 535 } 536 537 size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h) 538 { 539 BUG_ON(!validfmt(fmt)); 540 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; 541 } 542 543 uint32_t tiler_get_cpu_cache_flags(void) 544 { 545 return omap_dmm->plat_data->cpu_cache_flags; 546 } 547 548 bool dmm_is_available(void) 549 { 550 return omap_dmm ? true : false; 551 } 552 553 static int omap_dmm_remove(struct platform_device *dev) 554 { 555 struct tiler_block *block, *_block; 556 int i; 557 unsigned long flags; 558 559 if (omap_dmm) { 560 /* free all area regions */ 561 spin_lock_irqsave(&list_lock, flags); 562 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head, 563 alloc_node) { 564 list_del(&block->alloc_node); 565 kfree(block); 566 } 567 spin_unlock_irqrestore(&list_lock, flags); 568 569 for (i = 0; i < omap_dmm->num_lut; i++) 570 if (omap_dmm->tcm && omap_dmm->tcm[i]) 571 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]); 572 kfree(omap_dmm->tcm); 573 574 kfree(omap_dmm->engines); 575 if (omap_dmm->refill_va) 576 dma_free_writecombine(omap_dmm->dev, 577 REFILL_BUFFER_SIZE * omap_dmm->num_engines, 578 omap_dmm->refill_va, 579 omap_dmm->refill_pa); 580 if (omap_dmm->dummy_page) 581 __free_page(omap_dmm->dummy_page); 582 583 if (omap_dmm->irq > 0) 584 free_irq(omap_dmm->irq, omap_dmm); 585 586 iounmap(omap_dmm->base); 587 kfree(omap_dmm); 588 omap_dmm = NULL; 589 } 590 591 return 0; 592 } 593 594 static int omap_dmm_probe(struct platform_device *dev) 595 { 596 int ret = -EFAULT, i; 597 struct tcm_area area = {0}; 598 u32 hwinfo, pat_geom; 599 struct resource *mem; 600 601 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL); 602 if (!omap_dmm) 603 goto fail; 604 605 /* initialize lists */ 606 INIT_LIST_HEAD(&omap_dmm->alloc_head); 607 INIT_LIST_HEAD(&omap_dmm->idle_head); 608 609 init_waitqueue_head(&omap_dmm->engine_queue); 610 611 if (dev->dev.of_node) { 612 const struct of_device_id *match; 613 614 match = of_match_node(dmm_of_match, dev->dev.of_node); 615 if (!match) { 616 dev_err(&dev->dev, "failed to find matching device node\n"); 617 return -ENODEV; 618 } 619 620 omap_dmm->plat_data = match->data; 621 } 622 623 /* lookup hwmod data - base address and irq */ 624 mem = platform_get_resource(dev, IORESOURCE_MEM, 0); 625 if (!mem) { 626 dev_err(&dev->dev, "failed to get base address resource\n"); 627 goto fail; 628 } 629 630 omap_dmm->base = ioremap(mem->start, SZ_2K); 631 632 if (!omap_dmm->base) { 633 dev_err(&dev->dev, "failed to get dmm base address\n"); 634 goto fail; 635 } 636 637 omap_dmm->irq = platform_get_irq(dev, 0); 638 if (omap_dmm->irq < 0) { 639 dev_err(&dev->dev, "failed to get IRQ resource\n"); 640 goto fail; 641 } 642 643 omap_dmm->dev = &dev->dev; 644 645 hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO); 646 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F; 647 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F; 648 omap_dmm->container_width = 256; 649 omap_dmm->container_height = 128; 650 651 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines); 652 653 /* read out actual LUT width and height */ 654 pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY); 655 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5; 656 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5; 657 658 /* increment LUT by one if on OMAP5 */ 659 /* LUT has twice the height, and is split into a separate container */ 660 if (omap_dmm->lut_height != omap_dmm->container_height) 661 omap_dmm->num_lut++; 662 663 /* initialize DMM registers */ 664 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0); 665 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1); 666 writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0); 667 writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE); 668 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0); 669 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1); 670 671 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED, 672 "omap_dmm_irq_handler", omap_dmm); 673 674 if (ret) { 675 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n", 676 omap_dmm->irq, ret); 677 omap_dmm->irq = -1; 678 goto fail; 679 } 680 681 /* Enable all interrupts for each refill engine except 682 * ERR_LUT_MISS<n> (which is just advisory, and we don't care 683 * about because we want to be able to refill live scanout 684 * buffers for accelerated pan/scroll) and FILL_DSC<n> which 685 * we just generally don't care about. 686 */ 687 writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET); 688 689 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32); 690 if (!omap_dmm->dummy_page) { 691 dev_err(&dev->dev, "could not allocate dummy page\n"); 692 ret = -ENOMEM; 693 goto fail; 694 } 695 696 /* set dma mask for device */ 697 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32)); 698 if (ret) 699 goto fail; 700 701 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page); 702 703 /* alloc refill memory */ 704 omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev, 705 REFILL_BUFFER_SIZE * omap_dmm->num_engines, 706 &omap_dmm->refill_pa, GFP_KERNEL); 707 if (!omap_dmm->refill_va) { 708 dev_err(&dev->dev, "could not allocate refill memory\n"); 709 goto fail; 710 } 711 712 /* alloc engines */ 713 omap_dmm->engines = kcalloc(omap_dmm->num_engines, 714 sizeof(struct refill_engine), GFP_KERNEL); 715 if (!omap_dmm->engines) { 716 ret = -ENOMEM; 717 goto fail; 718 } 719 720 for (i = 0; i < omap_dmm->num_engines; i++) { 721 omap_dmm->engines[i].id = i; 722 omap_dmm->engines[i].dmm = omap_dmm; 723 omap_dmm->engines[i].refill_va = omap_dmm->refill_va + 724 (REFILL_BUFFER_SIZE * i); 725 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa + 726 (REFILL_BUFFER_SIZE * i); 727 init_completion(&omap_dmm->engines[i].compl); 728 729 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head); 730 } 731 732 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm), 733 GFP_KERNEL); 734 if (!omap_dmm->tcm) { 735 ret = -ENOMEM; 736 goto fail; 737 } 738 739 /* init containers */ 740 /* Each LUT is associated with a TCM (container manager). We use the 741 lut_id to denote the lut_id used to identify the correct LUT for 742 programming during reill operations */ 743 for (i = 0; i < omap_dmm->num_lut; i++) { 744 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width, 745 omap_dmm->container_height); 746 747 if (!omap_dmm->tcm[i]) { 748 dev_err(&dev->dev, "failed to allocate container\n"); 749 ret = -ENOMEM; 750 goto fail; 751 } 752 753 omap_dmm->tcm[i]->lut_id = i; 754 } 755 756 /* assign access mode containers to applicable tcm container */ 757 /* OMAP 4 has 1 container for all 4 views */ 758 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */ 759 containers[TILFMT_8BIT] = omap_dmm->tcm[0]; 760 containers[TILFMT_16BIT] = omap_dmm->tcm[0]; 761 containers[TILFMT_32BIT] = omap_dmm->tcm[0]; 762 763 if (omap_dmm->container_height != omap_dmm->lut_height) { 764 /* second LUT is used for PAGE mode. Programming must use 765 y offset that is added to all y coordinates. LUT id is still 766 0, because it is the same LUT, just the upper 128 lines */ 767 containers[TILFMT_PAGE] = omap_dmm->tcm[1]; 768 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET; 769 omap_dmm->tcm[1]->lut_id = 0; 770 } else { 771 containers[TILFMT_PAGE] = omap_dmm->tcm[0]; 772 } 773 774 area = (struct tcm_area) { 775 .tcm = NULL, 776 .p1.x = omap_dmm->container_width - 1, 777 .p1.y = omap_dmm->container_height - 1, 778 }; 779 780 /* initialize all LUTs to dummy page entries */ 781 for (i = 0; i < omap_dmm->num_lut; i++) { 782 area.tcm = omap_dmm->tcm[i]; 783 if (fill(&area, NULL, 0, 0, true)) 784 dev_err(omap_dmm->dev, "refill failed"); 785 } 786 787 dev_info(omap_dmm->dev, "initialized all PAT entries\n"); 788 789 return 0; 790 791 fail: 792 if (omap_dmm_remove(dev)) 793 dev_err(&dev->dev, "cleanup failed\n"); 794 return ret; 795 } 796 797 /* 798 * debugfs support 799 */ 800 801 #ifdef CONFIG_DEBUG_FS 802 803 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz" 804 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; 805 static const char *special = ".,:;'\"`~!^-+"; 806 807 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a, 808 char c, bool ovw) 809 { 810 int x, y; 811 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++) 812 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++) 813 if (map[y][x] == ' ' || ovw) 814 map[y][x] = c; 815 } 816 817 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p, 818 char c) 819 { 820 map[p->y / ydiv][p->x / xdiv] = c; 821 } 822 823 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p) 824 { 825 return map[p->y / ydiv][p->x / xdiv]; 826 } 827 828 static int map_width(int xdiv, int x0, int x1) 829 { 830 return (x1 / xdiv) - (x0 / xdiv) + 1; 831 } 832 833 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1) 834 { 835 char *p = map[yd] + (x0 / xdiv); 836 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2; 837 if (w >= 0) { 838 p += w; 839 while (*nice) 840 *p++ = *nice++; 841 } 842 } 843 844 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice, 845 struct tcm_area *a) 846 { 847 sprintf(nice, "%dK", tcm_sizeof(*a) * 4); 848 if (a->p0.y + 1 < a->p1.y) { 849 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, 850 256 - 1); 851 } else if (a->p0.y < a->p1.y) { 852 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1)) 853 text_map(map, xdiv, nice, a->p0.y / ydiv, 854 a->p0.x + xdiv, 256 - 1); 855 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x)) 856 text_map(map, xdiv, nice, a->p1.y / ydiv, 857 0, a->p1.y - xdiv); 858 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) { 859 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); 860 } 861 } 862 863 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice, 864 struct tcm_area *a) 865 { 866 sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a)); 867 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) 868 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 869 a->p0.x, a->p1.x); 870 } 871 872 int tiler_map_show(struct seq_file *s, void *arg) 873 { 874 int xdiv = 2, ydiv = 1; 875 char **map = NULL, *global_map; 876 struct tiler_block *block; 877 struct tcm_area a, p; 878 int i; 879 const char *m2d = alphabet; 880 const char *a2d = special; 881 const char *m2dp = m2d, *a2dp = a2d; 882 char nice[128]; 883 int h_adj; 884 int w_adj; 885 unsigned long flags; 886 int lut_idx; 887 888 889 if (!omap_dmm) { 890 /* early return if dmm/tiler device is not initialized */ 891 return 0; 892 } 893 894 h_adj = omap_dmm->container_height / ydiv; 895 w_adj = omap_dmm->container_width / xdiv; 896 897 map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL); 898 global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL); 899 900 if (!map || !global_map) 901 goto error; 902 903 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) { 904 memset(map, 0, h_adj * sizeof(*map)); 905 memset(global_map, ' ', (w_adj + 1) * h_adj); 906 907 for (i = 0; i < omap_dmm->container_height; i++) { 908 map[i] = global_map + i * (w_adj + 1); 909 map[i][w_adj] = 0; 910 } 911 912 spin_lock_irqsave(&list_lock, flags); 913 914 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) { 915 if (block->area.tcm == omap_dmm->tcm[lut_idx]) { 916 if (block->fmt != TILFMT_PAGE) { 917 fill_map(map, xdiv, ydiv, &block->area, 918 *m2dp, true); 919 if (!*++a2dp) 920 a2dp = a2d; 921 if (!*++m2dp) 922 m2dp = m2d; 923 map_2d_info(map, xdiv, ydiv, nice, 924 &block->area); 925 } else { 926 bool start = read_map_pt(map, xdiv, 927 ydiv, &block->area.p0) == ' '; 928 bool end = read_map_pt(map, xdiv, ydiv, 929 &block->area.p1) == ' '; 930 931 tcm_for_each_slice(a, block->area, p) 932 fill_map(map, xdiv, ydiv, &a, 933 '=', true); 934 fill_map_pt(map, xdiv, ydiv, 935 &block->area.p0, 936 start ? '<' : 'X'); 937 fill_map_pt(map, xdiv, ydiv, 938 &block->area.p1, 939 end ? '>' : 'X'); 940 map_1d_info(map, xdiv, ydiv, nice, 941 &block->area); 942 } 943 } 944 } 945 946 spin_unlock_irqrestore(&list_lock, flags); 947 948 if (s) { 949 seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx); 950 for (i = 0; i < 128; i++) 951 seq_printf(s, "%03d:%s\n", i, map[i]); 952 seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx); 953 } else { 954 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n", 955 lut_idx); 956 for (i = 0; i < 128; i++) 957 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); 958 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n", 959 lut_idx); 960 } 961 } 962 963 error: 964 kfree(map); 965 kfree(global_map); 966 967 return 0; 968 } 969 #endif 970 971 #ifdef CONFIG_PM_SLEEP 972 static int omap_dmm_resume(struct device *dev) 973 { 974 struct tcm_area area; 975 int i; 976 977 if (!omap_dmm) 978 return -ENODEV; 979 980 area = (struct tcm_area) { 981 .tcm = NULL, 982 .p1.x = omap_dmm->container_width - 1, 983 .p1.y = omap_dmm->container_height - 1, 984 }; 985 986 /* initialize all LUTs to dummy page entries */ 987 for (i = 0; i < omap_dmm->num_lut; i++) { 988 area.tcm = omap_dmm->tcm[i]; 989 if (fill(&area, NULL, 0, 0, true)) 990 dev_err(dev, "refill failed"); 991 } 992 993 return 0; 994 } 995 #endif 996 997 static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume); 998 999 #if defined(CONFIG_OF) 1000 static const struct dmm_platform_data dmm_omap4_platform_data = { 1001 .cpu_cache_flags = OMAP_BO_WC, 1002 }; 1003 1004 static const struct dmm_platform_data dmm_omap5_platform_data = { 1005 .cpu_cache_flags = OMAP_BO_UNCACHED, 1006 }; 1007 1008 static const struct of_device_id dmm_of_match[] = { 1009 { 1010 .compatible = "ti,omap4-dmm", 1011 .data = &dmm_omap4_platform_data, 1012 }, 1013 { 1014 .compatible = "ti,omap5-dmm", 1015 .data = &dmm_omap5_platform_data, 1016 }, 1017 {}, 1018 }; 1019 #endif 1020 1021 struct platform_driver omap_dmm_driver = { 1022 .probe = omap_dmm_probe, 1023 .remove = omap_dmm_remove, 1024 .driver = { 1025 .owner = THIS_MODULE, 1026 .name = DMM_DRIVER_NAME, 1027 .of_match_table = of_match_ptr(dmm_of_match), 1028 .pm = &omap_dmm_pm_ops, 1029 }, 1030 }; 1031 1032 MODULE_LICENSE("GPL v2"); 1033 MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>"); 1034 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver"); 1035