1 /* 2 * drivers/gpu/drm/omapdrm/omap_gem.c 3 * 4 * Copyright (C) 2011 Texas Instruments 5 * Author: Rob Clark <rob.clark@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 21 #include <linux/spinlock.h> 22 #include <linux/shmem_fs.h> 23 #include <drm/drm_vma_manager.h> 24 25 #include "omap_drv.h" 26 #include "omap_dmm_tiler.h" 27 28 /* remove these once drm core helpers are merged */ 29 struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 31 bool dirty, bool accessed); 32 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); 33 34 /* 35 * GEM buffer object implementation. 36 */ 37 38 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 39 40 /* note: we use upper 8 bits of flags for driver-internal flags: */ 41 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ 42 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ 43 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ 44 45 46 struct omap_gem_object { 47 struct drm_gem_object base; 48 49 struct list_head mm_list; 50 51 uint32_t flags; 52 53 /** width/height for tiled formats (rounded up to slot boundaries) */ 54 uint16_t width, height; 55 56 /** roll applied when mapping to DMM */ 57 uint32_t roll; 58 59 /** 60 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag 61 * is set and the paddr is valid. Also if the buffer is remapped in 62 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using 63 * the physical address and OMAP_BO_DMA is not set, then you should 64 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is 65 * not removed from under your feet. 66 * 67 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable 68 * buffer is requested, but doesn't mean that it is. Use the 69 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable 70 * physical address. 71 */ 72 dma_addr_t paddr; 73 74 /** 75 * # of users of paddr 76 */ 77 uint32_t paddr_cnt; 78 79 /** 80 * tiler block used when buffer is remapped in DMM/TILER. 81 */ 82 struct tiler_block *block; 83 84 /** 85 * Array of backing pages, if allocated. Note that pages are never 86 * allocated for buffers originally allocated from contiguous memory 87 */ 88 struct page **pages; 89 90 /** addresses corresponding to pages in above array */ 91 dma_addr_t *addrs; 92 93 /** 94 * Virtual address, if mapped. 95 */ 96 void *vaddr; 97 98 /** 99 * sync-object allocated on demand (if needed) 100 * 101 * Per-buffer sync-object for tracking pending and completed hw/dma 102 * read and write operations. The layout in memory is dictated by 103 * the SGX firmware, which uses this information to stall the command 104 * stream if a surface is not ready yet. 105 * 106 * Note that when buffer is used by SGX, the sync-object needs to be 107 * allocated from a special heap of sync-objects. This way many sync 108 * objects can be packed in a page, and not waste GPU virtual address 109 * space. Because of this we have to have a omap_gem_set_sync_object() 110 * API to allow replacement of the syncobj after it has (potentially) 111 * already been allocated. A bit ugly but I haven't thought of a 112 * better alternative. 113 */ 114 struct { 115 uint32_t write_pending; 116 uint32_t write_complete; 117 uint32_t read_pending; 118 uint32_t read_complete; 119 } *sync; 120 }; 121 122 static int get_pages(struct drm_gem_object *obj, struct page ***pages); 123 static uint64_t mmap_offset(struct drm_gem_object *obj); 124 125 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 126 * not necessarily pinned in TILER all the time, and (b) when they are 127 * they are not necessarily page aligned, we reserve one or more small 128 * regions in each of the 2d containers to use as a user-GART where we 129 * can create a second page-aligned mapping of parts of the buffer 130 * being accessed from userspace. 131 * 132 * Note that we could optimize slightly when we know that multiple 133 * tiler containers are backed by the same PAT.. but I'll leave that 134 * for later.. 135 */ 136 #define NUM_USERGART_ENTRIES 2 137 struct usergart_entry { 138 struct tiler_block *block; /* the reserved tiler block */ 139 dma_addr_t paddr; 140 struct drm_gem_object *obj; /* the current pinned obj */ 141 pgoff_t obj_pgoff; /* page offset of obj currently 142 mapped in */ 143 }; 144 static struct { 145 struct usergart_entry entry[NUM_USERGART_ENTRIES]; 146 int height; /* height in rows */ 147 int height_shift; /* ilog2(height in rows) */ 148 int slot_shift; /* ilog2(width per slot) */ 149 int stride_pfn; /* stride in pages */ 150 int last; /* index of last used entry */ 151 } *usergart; 152 153 static void evict_entry(struct drm_gem_object *obj, 154 enum tiler_fmt fmt, struct usergart_entry *entry) 155 { 156 struct omap_gem_object *omap_obj = to_omap_bo(obj); 157 int n = usergart[fmt].height; 158 size_t size = PAGE_SIZE * n; 159 loff_t off = mmap_offset(obj) + 160 (entry->obj_pgoff << PAGE_SHIFT); 161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 162 163 if (m > 1) { 164 int i; 165 /* if stride > than PAGE_SIZE then sparse mapping: */ 166 for (i = n; i > 0; i--) { 167 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 168 off, PAGE_SIZE, 1); 169 off += PAGE_SIZE * m; 170 } 171 } else { 172 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 173 off, size, 1); 174 } 175 176 entry->obj = NULL; 177 } 178 179 /* Evict a buffer from usergart, if it is mapped there */ 180 static void evict(struct drm_gem_object *obj) 181 { 182 struct omap_gem_object *omap_obj = to_omap_bo(obj); 183 184 if (omap_obj->flags & OMAP_BO_TILED) { 185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 186 int i; 187 188 if (!usergart) 189 return; 190 191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 192 struct usergart_entry *entry = &usergart[fmt].entry[i]; 193 if (entry->obj == obj) 194 evict_entry(obj, fmt, entry); 195 } 196 } 197 } 198 199 /* GEM objects can either be allocated from contiguous memory (in which 200 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non 201 * contiguous buffers can be remapped in TILER/DMM if they need to be 202 * contiguous... but we don't do this all the time to reduce pressure 203 * on TILER/DMM space when we know at allocation time that the buffer 204 * will need to be scanned out. 205 */ 206 static inline bool is_shmem(struct drm_gem_object *obj) 207 { 208 return obj->filp != NULL; 209 } 210 211 /** 212 * shmem buffers that are mapped cached can simulate coherency via using 213 * page faulting to keep track of dirty pages 214 */ 215 static inline bool is_cached_coherent(struct drm_gem_object *obj) 216 { 217 struct omap_gem_object *omap_obj = to_omap_bo(obj); 218 return is_shmem(obj) && 219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); 220 } 221 222 static DEFINE_SPINLOCK(sync_lock); 223 224 /** ensure backing pages are allocated */ 225 static int omap_gem_attach_pages(struct drm_gem_object *obj) 226 { 227 struct drm_device *dev = obj->dev; 228 struct omap_gem_object *omap_obj = to_omap_bo(obj); 229 struct page **pages; 230 int npages = obj->size >> PAGE_SHIFT; 231 int i, ret; 232 dma_addr_t *addrs; 233 234 WARN_ON(omap_obj->pages); 235 236 pages = drm_gem_get_pages(obj); 237 if (IS_ERR(pages)) { 238 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 239 return PTR_ERR(pages); 240 } 241 242 /* for non-cached buffers, ensure the new pages are clean because 243 * DSS, GPU, etc. are not cache coherent: 244 */ 245 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 246 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); 247 if (!addrs) { 248 ret = -ENOMEM; 249 goto free_pages; 250 } 251 252 for (i = 0; i < npages; i++) { 253 addrs[i] = dma_map_page(dev->dev, pages[i], 254 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 255 } 256 } else { 257 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); 258 if (!addrs) { 259 ret = -ENOMEM; 260 goto free_pages; 261 } 262 } 263 264 omap_obj->addrs = addrs; 265 omap_obj->pages = pages; 266 267 return 0; 268 269 free_pages: 270 drm_gem_put_pages(obj, pages, true, false); 271 272 return ret; 273 } 274 275 /** release backing pages */ 276 static void omap_gem_detach_pages(struct drm_gem_object *obj) 277 { 278 struct omap_gem_object *omap_obj = to_omap_bo(obj); 279 280 /* for non-cached buffers, ensure the new pages are clean because 281 * DSS, GPU, etc. are not cache coherent: 282 */ 283 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 284 int i, npages = obj->size >> PAGE_SHIFT; 285 for (i = 0; i < npages; i++) { 286 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], 287 PAGE_SIZE, DMA_BIDIRECTIONAL); 288 } 289 } 290 291 kfree(omap_obj->addrs); 292 omap_obj->addrs = NULL; 293 294 drm_gem_put_pages(obj, omap_obj->pages, true, false); 295 omap_obj->pages = NULL; 296 } 297 298 /* get buffer flags */ 299 uint32_t omap_gem_flags(struct drm_gem_object *obj) 300 { 301 return to_omap_bo(obj)->flags; 302 } 303 304 /** get mmap offset */ 305 static uint64_t mmap_offset(struct drm_gem_object *obj) 306 { 307 struct drm_device *dev = obj->dev; 308 int ret; 309 size_t size; 310 311 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 312 313 /* Make it mmapable */ 314 size = omap_gem_mmap_size(obj); 315 ret = drm_gem_create_mmap_offset_size(obj, size); 316 if (ret) { 317 dev_err(dev->dev, "could not allocate mmap offset\n"); 318 return 0; 319 } 320 321 return drm_vma_node_offset_addr(&obj->vma_node); 322 } 323 324 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 325 { 326 uint64_t offset; 327 mutex_lock(&obj->dev->struct_mutex); 328 offset = mmap_offset(obj); 329 mutex_unlock(&obj->dev->struct_mutex); 330 return offset; 331 } 332 333 /** get mmap size */ 334 size_t omap_gem_mmap_size(struct drm_gem_object *obj) 335 { 336 struct omap_gem_object *omap_obj = to_omap_bo(obj); 337 size_t size = obj->size; 338 339 if (omap_obj->flags & OMAP_BO_TILED) { 340 /* for tiled buffers, the virtual size has stride rounded up 341 * to 4kb.. (to hide the fact that row n+1 might start 16kb or 342 * 32kb later!). But we don't back the entire buffer with 343 * pages, only the valid picture part.. so need to adjust for 344 * this in the size used to mmap and generate mmap offset 345 */ 346 size = tiler_vsize(gem2fmt(omap_obj->flags), 347 omap_obj->width, omap_obj->height); 348 } 349 350 return size; 351 } 352 353 /* get tiled size, returns -EINVAL if not tiled buffer */ 354 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) 355 { 356 struct omap_gem_object *omap_obj = to_omap_bo(obj); 357 if (omap_obj->flags & OMAP_BO_TILED) { 358 *w = omap_obj->width; 359 *h = omap_obj->height; 360 return 0; 361 } 362 return -EINVAL; 363 } 364 365 /* Normal handling for the case of faulting in non-tiled buffers */ 366 static int fault_1d(struct drm_gem_object *obj, 367 struct vm_area_struct *vma, struct vm_fault *vmf) 368 { 369 struct omap_gem_object *omap_obj = to_omap_bo(obj); 370 unsigned long pfn; 371 pgoff_t pgoff; 372 373 /* We don't use vmf->pgoff since that has the fake offset: */ 374 pgoff = ((unsigned long)vmf->virtual_address - 375 vma->vm_start) >> PAGE_SHIFT; 376 377 if (omap_obj->pages) { 378 omap_gem_cpu_sync(obj, pgoff); 379 pfn = page_to_pfn(omap_obj->pages[pgoff]); 380 } else { 381 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); 382 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; 383 } 384 385 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 386 pfn, pfn << PAGE_SHIFT); 387 388 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 389 } 390 391 /* Special handling for the case of faulting in 2d tiled buffers */ 392 static int fault_2d(struct drm_gem_object *obj, 393 struct vm_area_struct *vma, struct vm_fault *vmf) 394 { 395 struct omap_gem_object *omap_obj = to_omap_bo(obj); 396 struct usergart_entry *entry; 397 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 398 struct page *pages[64]; /* XXX is this too much to have on stack? */ 399 unsigned long pfn; 400 pgoff_t pgoff, base_pgoff; 401 void __user *vaddr; 402 int i, ret, slots; 403 404 /* 405 * Note the height of the slot is also equal to the number of pages 406 * that need to be mapped in to fill 4kb wide CPU page. If the slot 407 * height is 64, then 64 pages fill a 4kb wide by 64 row region. 408 */ 409 const int n = usergart[fmt].height; 410 const int n_shift = usergart[fmt].height_shift; 411 412 /* 413 * If buffer width in bytes > PAGE_SIZE then the virtual stride is 414 * rounded up to next multiple of PAGE_SIZE.. this need to be taken 415 * into account in some of the math, so figure out virtual stride 416 * in pages 417 */ 418 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 419 420 /* We don't use vmf->pgoff since that has the fake offset: */ 421 pgoff = ((unsigned long)vmf->virtual_address - 422 vma->vm_start) >> PAGE_SHIFT; 423 424 /* 425 * Actual address we start mapping at is rounded down to previous slot 426 * boundary in the y direction: 427 */ 428 base_pgoff = round_down(pgoff, m << n_shift); 429 430 /* figure out buffer width in slots */ 431 slots = omap_obj->width >> usergart[fmt].slot_shift; 432 433 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); 434 435 entry = &usergart[fmt].entry[usergart[fmt].last]; 436 437 /* evict previous buffer using this usergart entry, if any: */ 438 if (entry->obj) 439 evict_entry(entry->obj, fmt, entry); 440 441 entry->obj = obj; 442 entry->obj_pgoff = base_pgoff; 443 444 /* now convert base_pgoff to phys offset from virt offset: */ 445 base_pgoff = (base_pgoff >> n_shift) * slots; 446 447 /* for wider-than 4k.. figure out which part of the slot-row we want: */ 448 if (m > 1) { 449 int off = pgoff % m; 450 entry->obj_pgoff += off; 451 base_pgoff /= m; 452 slots = min(slots - (off << n_shift), n); 453 base_pgoff += off << n_shift; 454 vaddr += off << PAGE_SHIFT; 455 } 456 457 /* 458 * Map in pages. Beyond the valid pixel part of the buffer, we set 459 * pages[i] to NULL to get a dummy page mapped in.. if someone 460 * reads/writes it they will get random/undefined content, but at 461 * least it won't be corrupting whatever other random page used to 462 * be mapped in, or other undefined behavior. 463 */ 464 memcpy(pages, &omap_obj->pages[base_pgoff], 465 sizeof(struct page *) * slots); 466 memset(pages + slots, 0, 467 sizeof(struct page *) * (n - slots)); 468 469 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 470 if (ret) { 471 dev_err(obj->dev->dev, "failed to pin: %d\n", ret); 472 return ret; 473 } 474 475 pfn = entry->paddr >> PAGE_SHIFT; 476 477 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 478 pfn, pfn << PAGE_SHIFT); 479 480 for (i = n; i > 0; i--) { 481 vm_insert_mixed(vma, (unsigned long)vaddr, pfn); 482 pfn += usergart[fmt].stride_pfn; 483 vaddr += PAGE_SIZE * m; 484 } 485 486 /* simple round-robin: */ 487 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES; 488 489 return 0; 490 } 491 492 /** 493 * omap_gem_fault - pagefault handler for GEM objects 494 * @vma: the VMA of the GEM object 495 * @vmf: fault detail 496 * 497 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 498 * does most of the work for us including the actual map/unmap calls 499 * but we need to do the actual page work. 500 * 501 * The VMA was set up by GEM. In doing so it also ensured that the 502 * vma->vm_private_data points to the GEM object that is backing this 503 * mapping. 504 */ 505 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 506 { 507 struct drm_gem_object *obj = vma->vm_private_data; 508 struct omap_gem_object *omap_obj = to_omap_bo(obj); 509 struct drm_device *dev = obj->dev; 510 struct page **pages; 511 int ret; 512 513 /* Make sure we don't parallel update on a fault, nor move or remove 514 * something from beneath our feet 515 */ 516 mutex_lock(&dev->struct_mutex); 517 518 /* if a shmem backed object, make sure we have pages attached now */ 519 ret = get_pages(obj, &pages); 520 if (ret) 521 goto fail; 522 523 /* where should we do corresponding put_pages().. we are mapping 524 * the original page, rather than thru a GART, so we can't rely 525 * on eviction to trigger this. But munmap() or all mappings should 526 * probably trigger put_pages()? 527 */ 528 529 if (omap_obj->flags & OMAP_BO_TILED) 530 ret = fault_2d(obj, vma, vmf); 531 else 532 ret = fault_1d(obj, vma, vmf); 533 534 535 fail: 536 mutex_unlock(&dev->struct_mutex); 537 switch (ret) { 538 case 0: 539 case -ERESTARTSYS: 540 case -EINTR: 541 return VM_FAULT_NOPAGE; 542 case -ENOMEM: 543 return VM_FAULT_OOM; 544 default: 545 return VM_FAULT_SIGBUS; 546 } 547 } 548 549 /** We override mainly to fix up some of the vm mapping flags.. */ 550 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 551 { 552 int ret; 553 554 ret = drm_gem_mmap(filp, vma); 555 if (ret) { 556 DBG("mmap failed: %d", ret); 557 return ret; 558 } 559 560 return omap_gem_mmap_obj(vma->vm_private_data, vma); 561 } 562 563 int omap_gem_mmap_obj(struct drm_gem_object *obj, 564 struct vm_area_struct *vma) 565 { 566 struct omap_gem_object *omap_obj = to_omap_bo(obj); 567 568 vma->vm_flags &= ~VM_PFNMAP; 569 vma->vm_flags |= VM_MIXEDMAP; 570 571 if (omap_obj->flags & OMAP_BO_WC) { 572 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 573 } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 574 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 575 } else { 576 /* 577 * We do have some private objects, at least for scanout buffers 578 * on hardware without DMM/TILER. But these are allocated write- 579 * combine 580 */ 581 if (WARN_ON(!obj->filp)) 582 return -EINVAL; 583 584 /* 585 * Shunt off cached objs to shmem file so they have their own 586 * address_space (so unmap_mapping_range does what we want, 587 * in particular in the case of mmap'd dmabufs) 588 */ 589 fput(vma->vm_file); 590 vma->vm_pgoff = 0; 591 vma->vm_file = get_file(obj->filp); 592 593 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 594 } 595 596 return 0; 597 } 598 599 600 /** 601 * omap_gem_dumb_create - create a dumb buffer 602 * @drm_file: our client file 603 * @dev: our device 604 * @args: the requested arguments copied from userspace 605 * 606 * Allocate a buffer suitable for use for a frame buffer of the 607 * form described by user space. Give userspace a handle by which 608 * to reference it. 609 */ 610 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 611 struct drm_mode_create_dumb *args) 612 { 613 union omap_gem_size gsize; 614 615 args->pitch = align_pitch(0, args->width, args->bpp); 616 args->size = PAGE_ALIGN(args->pitch * args->height); 617 618 gsize = (union omap_gem_size){ 619 .bytes = args->size, 620 }; 621 622 return omap_gem_new_handle(dev, file, gsize, 623 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 624 } 625 626 /** 627 * omap_gem_dumb_map - buffer mapping for dumb interface 628 * @file: our drm client file 629 * @dev: drm device 630 * @handle: GEM handle to the object (from dumb_create) 631 * 632 * Do the necessary setup to allow the mapping of the frame buffer 633 * into user memory. We don't have to do much here at the moment. 634 */ 635 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 636 uint32_t handle, uint64_t *offset) 637 { 638 struct drm_gem_object *obj; 639 int ret = 0; 640 641 /* GEM does all our handle to object mapping */ 642 obj = drm_gem_object_lookup(dev, file, handle); 643 if (obj == NULL) { 644 ret = -ENOENT; 645 goto fail; 646 } 647 648 *offset = omap_gem_mmap_offset(obj); 649 650 drm_gem_object_unreference_unlocked(obj); 651 652 fail: 653 return ret; 654 } 655 656 /* Set scrolling position. This allows us to implement fast scrolling 657 * for console. 658 * 659 * Call only from non-atomic contexts. 660 */ 661 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 662 { 663 struct omap_gem_object *omap_obj = to_omap_bo(obj); 664 uint32_t npages = obj->size >> PAGE_SHIFT; 665 int ret = 0; 666 667 if (roll > npages) { 668 dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 669 return -EINVAL; 670 } 671 672 omap_obj->roll = roll; 673 674 mutex_lock(&obj->dev->struct_mutex); 675 676 /* if we aren't mapped yet, we don't need to do anything */ 677 if (omap_obj->block) { 678 struct page **pages; 679 ret = get_pages(obj, &pages); 680 if (ret) 681 goto fail; 682 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 683 if (ret) 684 dev_err(obj->dev->dev, "could not repin: %d\n", ret); 685 } 686 687 fail: 688 mutex_unlock(&obj->dev->struct_mutex); 689 690 return ret; 691 } 692 693 /* Sync the buffer for CPU access.. note pages should already be 694 * attached, ie. omap_gem_get_pages() 695 */ 696 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) 697 { 698 struct drm_device *dev = obj->dev; 699 struct omap_gem_object *omap_obj = to_omap_bo(obj); 700 701 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { 702 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], 703 PAGE_SIZE, DMA_BIDIRECTIONAL); 704 omap_obj->addrs[pgoff] = 0; 705 } 706 } 707 708 /* sync the buffer for DMA access */ 709 void omap_gem_dma_sync(struct drm_gem_object *obj, 710 enum dma_data_direction dir) 711 { 712 struct drm_device *dev = obj->dev; 713 struct omap_gem_object *omap_obj = to_omap_bo(obj); 714 715 if (is_cached_coherent(obj)) { 716 int i, npages = obj->size >> PAGE_SHIFT; 717 struct page **pages = omap_obj->pages; 718 bool dirty = false; 719 720 for (i = 0; i < npages; i++) { 721 if (!omap_obj->addrs[i]) { 722 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, 723 PAGE_SIZE, DMA_BIDIRECTIONAL); 724 dirty = true; 725 } 726 } 727 728 if (dirty) { 729 unmap_mapping_range(obj->filp->f_mapping, 0, 730 omap_gem_mmap_size(obj), 1); 731 } 732 } 733 } 734 735 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not 736 * already contiguous, remap it to pin in physically contiguous memory.. (ie. 737 * map in TILER) 738 */ 739 int omap_gem_get_paddr(struct drm_gem_object *obj, 740 dma_addr_t *paddr, bool remap) 741 { 742 struct omap_drm_private *priv = obj->dev->dev_private; 743 struct omap_gem_object *omap_obj = to_omap_bo(obj); 744 int ret = 0; 745 746 mutex_lock(&obj->dev->struct_mutex); 747 748 if (remap && is_shmem(obj) && priv->has_dmm) { 749 if (omap_obj->paddr_cnt == 0) { 750 struct page **pages; 751 uint32_t npages = obj->size >> PAGE_SHIFT; 752 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 753 struct tiler_block *block; 754 755 BUG_ON(omap_obj->block); 756 757 ret = get_pages(obj, &pages); 758 if (ret) 759 goto fail; 760 761 if (omap_obj->flags & OMAP_BO_TILED) { 762 block = tiler_reserve_2d(fmt, 763 omap_obj->width, 764 omap_obj->height, 0); 765 } else { 766 block = tiler_reserve_1d(obj->size); 767 } 768 769 if (IS_ERR(block)) { 770 ret = PTR_ERR(block); 771 dev_err(obj->dev->dev, 772 "could not remap: %d (%d)\n", ret, fmt); 773 goto fail; 774 } 775 776 /* TODO: enable async refill.. */ 777 ret = tiler_pin(block, pages, npages, 778 omap_obj->roll, true); 779 if (ret) { 780 tiler_release(block); 781 dev_err(obj->dev->dev, 782 "could not pin: %d\n", ret); 783 goto fail; 784 } 785 786 omap_obj->paddr = tiler_ssptr(block); 787 omap_obj->block = block; 788 789 DBG("got paddr: %pad", &omap_obj->paddr); 790 } 791 792 omap_obj->paddr_cnt++; 793 794 *paddr = omap_obj->paddr; 795 } else if (omap_obj->flags & OMAP_BO_DMA) { 796 *paddr = omap_obj->paddr; 797 } else { 798 ret = -EINVAL; 799 goto fail; 800 } 801 802 fail: 803 mutex_unlock(&obj->dev->struct_mutex); 804 805 return ret; 806 } 807 808 /* Release physical address, when DMA is no longer being performed.. this 809 * could potentially unpin and unmap buffers from TILER 810 */ 811 int omap_gem_put_paddr(struct drm_gem_object *obj) 812 { 813 struct omap_gem_object *omap_obj = to_omap_bo(obj); 814 int ret = 0; 815 816 mutex_lock(&obj->dev->struct_mutex); 817 if (omap_obj->paddr_cnt > 0) { 818 omap_obj->paddr_cnt--; 819 if (omap_obj->paddr_cnt == 0) { 820 ret = tiler_unpin(omap_obj->block); 821 if (ret) { 822 dev_err(obj->dev->dev, 823 "could not unpin pages: %d\n", ret); 824 goto fail; 825 } 826 ret = tiler_release(omap_obj->block); 827 if (ret) { 828 dev_err(obj->dev->dev, 829 "could not release unmap: %d\n", ret); 830 } 831 omap_obj->block = NULL; 832 } 833 } 834 fail: 835 mutex_unlock(&obj->dev->struct_mutex); 836 return ret; 837 } 838 839 /* Get rotated scanout address (only valid if already pinned), at the 840 * specified orientation and x,y offset from top-left corner of buffer 841 * (only valid for tiled 2d buffers) 842 */ 843 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, 844 int x, int y, dma_addr_t *paddr) 845 { 846 struct omap_gem_object *omap_obj = to_omap_bo(obj); 847 int ret = -EINVAL; 848 849 mutex_lock(&obj->dev->struct_mutex); 850 if ((omap_obj->paddr_cnt > 0) && omap_obj->block && 851 (omap_obj->flags & OMAP_BO_TILED)) { 852 *paddr = tiler_tsptr(omap_obj->block, orient, x, y); 853 ret = 0; 854 } 855 mutex_unlock(&obj->dev->struct_mutex); 856 return ret; 857 } 858 859 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 860 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 861 { 862 struct omap_gem_object *omap_obj = to_omap_bo(obj); 863 int ret = -EINVAL; 864 if (omap_obj->flags & OMAP_BO_TILED) 865 ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 866 return ret; 867 } 868 869 /* acquire pages when needed (for example, for DMA where physically 870 * contiguous buffer is not required 871 */ 872 static int get_pages(struct drm_gem_object *obj, struct page ***pages) 873 { 874 struct omap_gem_object *omap_obj = to_omap_bo(obj); 875 int ret = 0; 876 877 if (is_shmem(obj) && !omap_obj->pages) { 878 ret = omap_gem_attach_pages(obj); 879 if (ret) { 880 dev_err(obj->dev->dev, "could not attach pages\n"); 881 return ret; 882 } 883 } 884 885 /* TODO: even phys-contig.. we should have a list of pages? */ 886 *pages = omap_obj->pages; 887 888 return 0; 889 } 890 891 /* if !remap, and we don't have pages backing, then fail, rather than 892 * increasing the pin count (which we don't really do yet anyways, 893 * because we don't support swapping pages back out). And 'remap' 894 * might not be quite the right name, but I wanted to keep it working 895 * similarly to omap_gem_get_paddr(). Note though that mutex is not 896 * aquired if !remap (because this can be called in atomic ctxt), 897 * but probably omap_gem_get_paddr() should be changed to work in the 898 * same way. If !remap, a matching omap_gem_put_pages() call is not 899 * required (and should not be made). 900 */ 901 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 902 bool remap) 903 { 904 int ret; 905 if (!remap) { 906 struct omap_gem_object *omap_obj = to_omap_bo(obj); 907 if (!omap_obj->pages) 908 return -ENOMEM; 909 *pages = omap_obj->pages; 910 return 0; 911 } 912 mutex_lock(&obj->dev->struct_mutex); 913 ret = get_pages(obj, pages); 914 mutex_unlock(&obj->dev->struct_mutex); 915 return ret; 916 } 917 918 /* release pages when DMA no longer being performed */ 919 int omap_gem_put_pages(struct drm_gem_object *obj) 920 { 921 /* do something here if we dynamically attach/detach pages.. at 922 * least they would no longer need to be pinned if everyone has 923 * released the pages.. 924 */ 925 return 0; 926 } 927 928 /* Get kernel virtual address for CPU access.. this more or less only 929 * exists for omap_fbdev. This should be called with struct_mutex 930 * held. 931 */ 932 void *omap_gem_vaddr(struct drm_gem_object *obj) 933 { 934 struct omap_gem_object *omap_obj = to_omap_bo(obj); 935 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 936 if (!omap_obj->vaddr) { 937 struct page **pages; 938 int ret = get_pages(obj, &pages); 939 if (ret) 940 return ERR_PTR(ret); 941 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 942 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 943 } 944 return omap_obj->vaddr; 945 } 946 947 #ifdef CONFIG_PM 948 /* re-pin objects in DMM in resume path: */ 949 int omap_gem_resume(struct device *dev) 950 { 951 struct drm_device *drm_dev = dev_get_drvdata(dev); 952 struct omap_drm_private *priv = drm_dev->dev_private; 953 struct omap_gem_object *omap_obj; 954 int ret = 0; 955 956 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 957 if (omap_obj->block) { 958 struct drm_gem_object *obj = &omap_obj->base; 959 uint32_t npages = obj->size >> PAGE_SHIFT; 960 WARN_ON(!omap_obj->pages); /* this can't happen */ 961 ret = tiler_pin(omap_obj->block, 962 omap_obj->pages, npages, 963 omap_obj->roll, true); 964 if (ret) { 965 dev_err(dev, "could not repin: %d\n", ret); 966 return ret; 967 } 968 } 969 } 970 971 return 0; 972 } 973 #endif 974 975 #ifdef CONFIG_DEBUG_FS 976 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 977 { 978 struct omap_gem_object *omap_obj = to_omap_bo(obj); 979 uint64_t off; 980 981 off = drm_vma_node_start(&obj->vma_node); 982 983 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 984 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 985 off, &omap_obj->paddr, omap_obj->paddr_cnt, 986 omap_obj->vaddr, omap_obj->roll); 987 988 if (omap_obj->flags & OMAP_BO_TILED) { 989 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 990 if (omap_obj->block) { 991 struct tcm_area *area = &omap_obj->block->area; 992 seq_printf(m, " (%dx%d, %dx%d)", 993 area->p0.x, area->p0.y, 994 area->p1.x, area->p1.y); 995 } 996 } else { 997 seq_printf(m, " %d", obj->size); 998 } 999 1000 seq_printf(m, "\n"); 1001 } 1002 1003 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 1004 { 1005 struct omap_gem_object *omap_obj; 1006 int count = 0; 1007 size_t size = 0; 1008 1009 list_for_each_entry(omap_obj, list, mm_list) { 1010 struct drm_gem_object *obj = &omap_obj->base; 1011 seq_printf(m, " "); 1012 omap_gem_describe(obj, m); 1013 count++; 1014 size += obj->size; 1015 } 1016 1017 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 1018 } 1019 #endif 1020 1021 /* Buffer Synchronization: 1022 */ 1023 1024 struct omap_gem_sync_waiter { 1025 struct list_head list; 1026 struct omap_gem_object *omap_obj; 1027 enum omap_gem_op op; 1028 uint32_t read_target, write_target; 1029 /* notify called w/ sync_lock held */ 1030 void (*notify)(void *arg); 1031 void *arg; 1032 }; 1033 1034 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when 1035 * the read and/or write target count is achieved which can call a user 1036 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for 1037 * cpu access), etc. 1038 */ 1039 static LIST_HEAD(waiters); 1040 1041 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) 1042 { 1043 struct omap_gem_object *omap_obj = waiter->omap_obj; 1044 if ((waiter->op & OMAP_GEM_READ) && 1045 (omap_obj->sync->write_complete < waiter->write_target)) 1046 return true; 1047 if ((waiter->op & OMAP_GEM_WRITE) && 1048 (omap_obj->sync->read_complete < waiter->read_target)) 1049 return true; 1050 return false; 1051 } 1052 1053 /* macro for sync debug.. */ 1054 #define SYNCDBG 0 1055 #define SYNC(fmt, ...) do { if (SYNCDBG) \ 1056 printk(KERN_ERR "%s:%d: "fmt"\n", \ 1057 __func__, __LINE__, ##__VA_ARGS__); \ 1058 } while (0) 1059 1060 1061 static void sync_op_update(void) 1062 { 1063 struct omap_gem_sync_waiter *waiter, *n; 1064 list_for_each_entry_safe(waiter, n, &waiters, list) { 1065 if (!is_waiting(waiter)) { 1066 list_del(&waiter->list); 1067 SYNC("notify: %p", waiter); 1068 waiter->notify(waiter->arg); 1069 kfree(waiter); 1070 } 1071 } 1072 } 1073 1074 static inline int sync_op(struct drm_gem_object *obj, 1075 enum omap_gem_op op, bool start) 1076 { 1077 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1078 int ret = 0; 1079 1080 spin_lock(&sync_lock); 1081 1082 if (!omap_obj->sync) { 1083 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); 1084 if (!omap_obj->sync) { 1085 ret = -ENOMEM; 1086 goto unlock; 1087 } 1088 } 1089 1090 if (start) { 1091 if (op & OMAP_GEM_READ) 1092 omap_obj->sync->read_pending++; 1093 if (op & OMAP_GEM_WRITE) 1094 omap_obj->sync->write_pending++; 1095 } else { 1096 if (op & OMAP_GEM_READ) 1097 omap_obj->sync->read_complete++; 1098 if (op & OMAP_GEM_WRITE) 1099 omap_obj->sync->write_complete++; 1100 sync_op_update(); 1101 } 1102 1103 unlock: 1104 spin_unlock(&sync_lock); 1105 1106 return ret; 1107 } 1108 1109 /* it is a bit lame to handle updates in this sort of polling way, but 1110 * in case of PVR, the GPU can directly update read/write complete 1111 * values, and not really tell us which ones it updated.. this also 1112 * means that sync_lock is not quite sufficient. So we'll need to 1113 * do something a bit better when it comes time to add support for 1114 * separate 2d hw.. 1115 */ 1116 void omap_gem_op_update(void) 1117 { 1118 spin_lock(&sync_lock); 1119 sync_op_update(); 1120 spin_unlock(&sync_lock); 1121 } 1122 1123 /* mark the start of read and/or write operation */ 1124 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) 1125 { 1126 return sync_op(obj, op, true); 1127 } 1128 1129 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) 1130 { 1131 return sync_op(obj, op, false); 1132 } 1133 1134 static DECLARE_WAIT_QUEUE_HEAD(sync_event); 1135 1136 static void sync_notify(void *arg) 1137 { 1138 struct task_struct **waiter_task = arg; 1139 *waiter_task = NULL; 1140 wake_up_all(&sync_event); 1141 } 1142 1143 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) 1144 { 1145 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1146 int ret = 0; 1147 if (omap_obj->sync) { 1148 struct task_struct *waiter_task = current; 1149 struct omap_gem_sync_waiter *waiter = 1150 kzalloc(sizeof(*waiter), GFP_KERNEL); 1151 1152 if (!waiter) 1153 return -ENOMEM; 1154 1155 waiter->omap_obj = omap_obj; 1156 waiter->op = op; 1157 waiter->read_target = omap_obj->sync->read_pending; 1158 waiter->write_target = omap_obj->sync->write_pending; 1159 waiter->notify = sync_notify; 1160 waiter->arg = &waiter_task; 1161 1162 spin_lock(&sync_lock); 1163 if (is_waiting(waiter)) { 1164 SYNC("waited: %p", waiter); 1165 list_add_tail(&waiter->list, &waiters); 1166 spin_unlock(&sync_lock); 1167 ret = wait_event_interruptible(sync_event, 1168 (waiter_task == NULL)); 1169 spin_lock(&sync_lock); 1170 if (waiter_task) { 1171 SYNC("interrupted: %p", waiter); 1172 /* we were interrupted */ 1173 list_del(&waiter->list); 1174 waiter_task = NULL; 1175 } else { 1176 /* freed in sync_op_update() */ 1177 waiter = NULL; 1178 } 1179 } 1180 spin_unlock(&sync_lock); 1181 kfree(waiter); 1182 } 1183 return ret; 1184 } 1185 1186 /* call fxn(arg), either synchronously or asynchronously if the op 1187 * is currently blocked.. fxn() can be called from any context 1188 * 1189 * (TODO for now fxn is called back from whichever context calls 1190 * omap_gem_op_update().. but this could be better defined later 1191 * if needed) 1192 * 1193 * TODO more code in common w/ _sync().. 1194 */ 1195 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, 1196 void (*fxn)(void *arg), void *arg) 1197 { 1198 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1199 if (omap_obj->sync) { 1200 struct omap_gem_sync_waiter *waiter = 1201 kzalloc(sizeof(*waiter), GFP_ATOMIC); 1202 1203 if (!waiter) 1204 return -ENOMEM; 1205 1206 waiter->omap_obj = omap_obj; 1207 waiter->op = op; 1208 waiter->read_target = omap_obj->sync->read_pending; 1209 waiter->write_target = omap_obj->sync->write_pending; 1210 waiter->notify = fxn; 1211 waiter->arg = arg; 1212 1213 spin_lock(&sync_lock); 1214 if (is_waiting(waiter)) { 1215 SYNC("waited: %p", waiter); 1216 list_add_tail(&waiter->list, &waiters); 1217 spin_unlock(&sync_lock); 1218 return 0; 1219 } 1220 1221 spin_unlock(&sync_lock); 1222 1223 kfree(waiter); 1224 } 1225 1226 /* no waiting.. */ 1227 fxn(arg); 1228 1229 return 0; 1230 } 1231 1232 /* special API so PVR can update the buffer to use a sync-object allocated 1233 * from it's sync-obj heap. Only used for a newly allocated (from PVR's 1234 * perspective) sync-object, so we overwrite the new syncobj w/ values 1235 * from the already allocated syncobj (if there is one) 1236 */ 1237 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) 1238 { 1239 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1240 int ret = 0; 1241 1242 spin_lock(&sync_lock); 1243 1244 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { 1245 /* clearing a previously set syncobj */ 1246 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), 1247 GFP_ATOMIC); 1248 if (!syncobj) { 1249 ret = -ENOMEM; 1250 goto unlock; 1251 } 1252 omap_obj->flags &= ~OMAP_BO_EXT_SYNC; 1253 omap_obj->sync = syncobj; 1254 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { 1255 /* replacing an existing syncobj */ 1256 if (omap_obj->sync) { 1257 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); 1258 kfree(omap_obj->sync); 1259 } 1260 omap_obj->flags |= OMAP_BO_EXT_SYNC; 1261 omap_obj->sync = syncobj; 1262 } 1263 1264 unlock: 1265 spin_unlock(&sync_lock); 1266 return ret; 1267 } 1268 1269 /* don't call directly.. called from GEM core when it is time to actually 1270 * free the object.. 1271 */ 1272 void omap_gem_free_object(struct drm_gem_object *obj) 1273 { 1274 struct drm_device *dev = obj->dev; 1275 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1276 1277 evict(obj); 1278 1279 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1280 1281 list_del(&omap_obj->mm_list); 1282 1283 drm_gem_free_mmap_offset(obj); 1284 1285 /* this means the object is still pinned.. which really should 1286 * not happen. I think.. 1287 */ 1288 WARN_ON(omap_obj->paddr_cnt > 0); 1289 1290 /* don't free externally allocated backing memory */ 1291 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { 1292 if (omap_obj->pages) 1293 omap_gem_detach_pages(obj); 1294 1295 if (!is_shmem(obj)) { 1296 dma_free_writecombine(dev->dev, obj->size, 1297 omap_obj->vaddr, omap_obj->paddr); 1298 } else if (omap_obj->vaddr) { 1299 vunmap(omap_obj->vaddr); 1300 } 1301 } 1302 1303 /* don't free externally allocated syncobj */ 1304 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) 1305 kfree(omap_obj->sync); 1306 1307 drm_gem_object_release(obj); 1308 1309 kfree(obj); 1310 } 1311 1312 /* convenience method to construct a GEM buffer object, and userspace handle */ 1313 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1314 union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 1315 { 1316 struct drm_gem_object *obj; 1317 int ret; 1318 1319 obj = omap_gem_new(dev, gsize, flags); 1320 if (!obj) 1321 return -ENOMEM; 1322 1323 ret = drm_gem_handle_create(file, obj, handle); 1324 if (ret) { 1325 drm_gem_object_release(obj); 1326 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */ 1327 return ret; 1328 } 1329 1330 /* drop reference from allocate - handle holds it now */ 1331 drm_gem_object_unreference_unlocked(obj); 1332 1333 return 0; 1334 } 1335 1336 /* GEM buffer object constructor */ 1337 struct drm_gem_object *omap_gem_new(struct drm_device *dev, 1338 union omap_gem_size gsize, uint32_t flags) 1339 { 1340 struct omap_drm_private *priv = dev->dev_private; 1341 struct omap_gem_object *omap_obj; 1342 struct drm_gem_object *obj = NULL; 1343 struct address_space *mapping; 1344 size_t size; 1345 int ret; 1346 1347 if (flags & OMAP_BO_TILED) { 1348 if (!usergart) { 1349 dev_err(dev->dev, "Tiled buffers require DMM\n"); 1350 goto fail; 1351 } 1352 1353 /* tiled buffers are always shmem paged backed.. when they are 1354 * scanned out, they are remapped into DMM/TILER 1355 */ 1356 flags &= ~OMAP_BO_SCANOUT; 1357 1358 /* currently don't allow cached buffers.. there is some caching 1359 * stuff that needs to be handled better 1360 */ 1361 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED); 1362 flags |= OMAP_BO_WC; 1363 1364 /* align dimensions to slot boundaries... */ 1365 tiler_align(gem2fmt(flags), 1366 &gsize.tiled.width, &gsize.tiled.height); 1367 1368 /* ...and calculate size based on aligned dimensions */ 1369 size = tiler_size(gem2fmt(flags), 1370 gsize.tiled.width, gsize.tiled.height); 1371 } else { 1372 size = PAGE_ALIGN(gsize.bytes); 1373 } 1374 1375 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1376 if (!omap_obj) 1377 goto fail; 1378 1379 list_add(&omap_obj->mm_list, &priv->obj_list); 1380 1381 obj = &omap_obj->base; 1382 1383 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 1384 /* attempt to allocate contiguous memory if we don't 1385 * have DMM for remappign discontiguous buffers 1386 */ 1387 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 1388 &omap_obj->paddr, GFP_KERNEL); 1389 if (omap_obj->vaddr) 1390 flags |= OMAP_BO_DMA; 1391 1392 } 1393 1394 omap_obj->flags = flags; 1395 1396 if (flags & OMAP_BO_TILED) { 1397 omap_obj->width = gsize.tiled.width; 1398 omap_obj->height = gsize.tiled.height; 1399 } 1400 1401 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { 1402 drm_gem_private_object_init(dev, obj, size); 1403 } else { 1404 ret = drm_gem_object_init(dev, obj, size); 1405 if (ret) 1406 goto fail; 1407 1408 mapping = file_inode(obj->filp)->i_mapping; 1409 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1410 } 1411 1412 return obj; 1413 1414 fail: 1415 if (obj) 1416 omap_gem_free_object(obj); 1417 1418 return NULL; 1419 } 1420 1421 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */ 1422 void omap_gem_init(struct drm_device *dev) 1423 { 1424 struct omap_drm_private *priv = dev->dev_private; 1425 const enum tiler_fmt fmts[] = { 1426 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 1427 }; 1428 int i, j; 1429 1430 if (!dmm_is_available()) { 1431 /* DMM only supported on OMAP4 and later, so this isn't fatal */ 1432 dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 1433 return; 1434 } 1435 1436 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1437 if (!usergart) 1438 return; 1439 1440 /* reserve 4k aligned/wide regions for userspace mappings: */ 1441 for (i = 0; i < ARRAY_SIZE(fmts); i++) { 1442 uint16_t h = 1, w = PAGE_SIZE >> i; 1443 tiler_align(fmts[i], &w, &h); 1444 /* note: since each region is 1 4kb page wide, and minimum 1445 * number of rows, the height ends up being the same as the 1446 * # of pages in the region 1447 */ 1448 usergart[i].height = h; 1449 usergart[i].height_shift = ilog2(h); 1450 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 1451 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 1452 for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1453 struct usergart_entry *entry = &usergart[i].entry[j]; 1454 struct tiler_block *block = 1455 tiler_reserve_2d(fmts[i], w, h, 1456 PAGE_SIZE); 1457 if (IS_ERR(block)) { 1458 dev_err(dev->dev, 1459 "reserve failed: %d, %d, %ld\n", 1460 i, j, PTR_ERR(block)); 1461 return; 1462 } 1463 entry->paddr = tiler_ssptr(block); 1464 entry->block = block; 1465 1466 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, 1467 &entry->paddr, 1468 usergart[i].stride_pfn << PAGE_SHIFT); 1469 } 1470 } 1471 1472 priv->has_dmm = true; 1473 } 1474 1475 void omap_gem_deinit(struct drm_device *dev) 1476 { 1477 /* I believe we can rely on there being no more outstanding GEM 1478 * objects which could depend on usergart/dmm at this point. 1479 */ 1480 kfree(usergart); 1481 } 1482