1 /* 2 * drivers/gpu/drm/omapdrm/omap_gem.c 3 * 4 * Copyright (C) 2011 Texas Instruments 5 * Author: Rob Clark <rob.clark@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/seq_file.h> 21 #include <linux/shmem_fs.h> 22 #include <linux/spinlock.h> 23 #include <linux/pfn_t.h> 24 25 #include <drm/drm_vma_manager.h> 26 27 #include "omap_drv.h" 28 #include "omap_dmm_tiler.h" 29 30 /* 31 * GEM buffer object implementation. 32 */ 33 34 /* note: we use upper 8 bits of flags for driver-internal flags: */ 35 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ 36 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ 37 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ 38 39 struct omap_gem_object { 40 struct drm_gem_object base; 41 42 struct list_head mm_list; 43 44 uint32_t flags; 45 46 /** width/height for tiled formats (rounded up to slot boundaries) */ 47 uint16_t width, height; 48 49 /** roll applied when mapping to DMM */ 50 uint32_t roll; 51 52 /** 53 * dma_addr contains the buffer DMA address. It is valid for 54 * 55 * - buffers allocated through the DMA mapping API (with the 56 * OMAP_BO_MEM_DMA_API flag set) 57 * 58 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) 59 * if they are physically contiguous (when sgt->orig_nents == 1) 60 * 61 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in 62 * which case the DMA address points to the TILER aperture 63 * 64 * Physically contiguous buffers have their DMA address equal to the 65 * physical address as we don't remap those buffers through the TILER. 66 * 67 * Buffers mapped to the TILER have their DMA address pointing to the 68 * TILER aperture. As TILER mappings are refcounted (through 69 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin() 70 * to ensure that the mapping won't disappear unexpectedly. References 71 * must be released with omap_gem_unpin(). 72 */ 73 dma_addr_t dma_addr; 74 75 /** 76 * # of users of dma_addr 77 */ 78 uint32_t dma_addr_cnt; 79 80 /** 81 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag 82 * is set and the sgt field is valid. 83 */ 84 struct sg_table *sgt; 85 86 /** 87 * tiler block used when buffer is remapped in DMM/TILER. 88 */ 89 struct tiler_block *block; 90 91 /** 92 * Array of backing pages, if allocated. Note that pages are never 93 * allocated for buffers originally allocated from contiguous memory 94 */ 95 struct page **pages; 96 97 /** addresses corresponding to pages in above array */ 98 dma_addr_t *dma_addrs; 99 100 /** 101 * Virtual address, if mapped. 102 */ 103 void *vaddr; 104 }; 105 106 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 107 108 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 109 * not necessarily pinned in TILER all the time, and (b) when they are 110 * they are not necessarily page aligned, we reserve one or more small 111 * regions in each of the 2d containers to use as a user-GART where we 112 * can create a second page-aligned mapping of parts of the buffer 113 * being accessed from userspace. 114 * 115 * Note that we could optimize slightly when we know that multiple 116 * tiler containers are backed by the same PAT.. but I'll leave that 117 * for later.. 118 */ 119 #define NUM_USERGART_ENTRIES 2 120 struct omap_drm_usergart_entry { 121 struct tiler_block *block; /* the reserved tiler block */ 122 dma_addr_t dma_addr; 123 struct drm_gem_object *obj; /* the current pinned obj */ 124 pgoff_t obj_pgoff; /* page offset of obj currently 125 mapped in */ 126 }; 127 128 struct omap_drm_usergart { 129 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; 130 int height; /* height in rows */ 131 int height_shift; /* ilog2(height in rows) */ 132 int slot_shift; /* ilog2(width per slot) */ 133 int stride_pfn; /* stride in pages */ 134 int last; /* index of last used entry */ 135 }; 136 137 /* ----------------------------------------------------------------------------- 138 * Helpers 139 */ 140 141 /** get mmap offset */ 142 static uint64_t mmap_offset(struct drm_gem_object *obj) 143 { 144 struct drm_device *dev = obj->dev; 145 int ret; 146 size_t size; 147 148 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 149 150 /* Make it mmapable */ 151 size = omap_gem_mmap_size(obj); 152 ret = drm_gem_create_mmap_offset_size(obj, size); 153 if (ret) { 154 dev_err(dev->dev, "could not allocate mmap offset\n"); 155 return 0; 156 } 157 158 return drm_vma_node_offset_addr(&obj->vma_node); 159 } 160 161 static bool is_contiguous(struct omap_gem_object *omap_obj) 162 { 163 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) 164 return true; 165 166 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) 167 return true; 168 169 return false; 170 } 171 172 /* ----------------------------------------------------------------------------- 173 * Eviction 174 */ 175 176 static void evict_entry(struct drm_gem_object *obj, 177 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) 178 { 179 struct omap_gem_object *omap_obj = to_omap_bo(obj); 180 struct omap_drm_private *priv = obj->dev->dev_private; 181 int n = priv->usergart[fmt].height; 182 size_t size = PAGE_SIZE * n; 183 loff_t off = mmap_offset(obj) + 184 (entry->obj_pgoff << PAGE_SHIFT); 185 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); 186 187 if (m > 1) { 188 int i; 189 /* if stride > than PAGE_SIZE then sparse mapping: */ 190 for (i = n; i > 0; i--) { 191 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 192 off, PAGE_SIZE, 1); 193 off += PAGE_SIZE * m; 194 } 195 } else { 196 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 197 off, size, 1); 198 } 199 200 entry->obj = NULL; 201 } 202 203 /* Evict a buffer from usergart, if it is mapped there */ 204 static void evict(struct drm_gem_object *obj) 205 { 206 struct omap_gem_object *omap_obj = to_omap_bo(obj); 207 struct omap_drm_private *priv = obj->dev->dev_private; 208 209 if (omap_obj->flags & OMAP_BO_TILED) { 210 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 211 int i; 212 213 for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 214 struct omap_drm_usergart_entry *entry = 215 &priv->usergart[fmt].entry[i]; 216 217 if (entry->obj == obj) 218 evict_entry(obj, fmt, entry); 219 } 220 } 221 } 222 223 /* ----------------------------------------------------------------------------- 224 * Page Management 225 */ 226 227 /** ensure backing pages are allocated */ 228 static int omap_gem_attach_pages(struct drm_gem_object *obj) 229 { 230 struct drm_device *dev = obj->dev; 231 struct omap_gem_object *omap_obj = to_omap_bo(obj); 232 struct page **pages; 233 int npages = obj->size >> PAGE_SHIFT; 234 int i, ret; 235 dma_addr_t *addrs; 236 237 WARN_ON(omap_obj->pages); 238 239 pages = drm_gem_get_pages(obj); 240 if (IS_ERR(pages)) { 241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 242 return PTR_ERR(pages); 243 } 244 245 /* for non-cached buffers, ensure the new pages are clean because 246 * DSS, GPU, etc. are not cache coherent: 247 */ 248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); 250 if (!addrs) { 251 ret = -ENOMEM; 252 goto free_pages; 253 } 254 255 for (i = 0; i < npages; i++) { 256 addrs[i] = dma_map_page(dev->dev, pages[i], 257 0, PAGE_SIZE, DMA_TO_DEVICE); 258 259 if (dma_mapping_error(dev->dev, addrs[i])) { 260 dev_warn(dev->dev, 261 "%s: failed to map page\n", __func__); 262 263 for (i = i - 1; i >= 0; --i) { 264 dma_unmap_page(dev->dev, addrs[i], 265 PAGE_SIZE, DMA_TO_DEVICE); 266 } 267 268 ret = -ENOMEM; 269 goto free_addrs; 270 } 271 } 272 } else { 273 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); 274 if (!addrs) { 275 ret = -ENOMEM; 276 goto free_pages; 277 } 278 } 279 280 omap_obj->dma_addrs = addrs; 281 omap_obj->pages = pages; 282 283 return 0; 284 285 free_addrs: 286 kfree(addrs); 287 free_pages: 288 drm_gem_put_pages(obj, pages, true, false); 289 290 return ret; 291 } 292 293 /* acquire pages when needed (for example, for DMA where physically 294 * contiguous buffer is not required 295 */ 296 static int get_pages(struct drm_gem_object *obj, struct page ***pages) 297 { 298 struct omap_gem_object *omap_obj = to_omap_bo(obj); 299 int ret = 0; 300 301 if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) { 302 ret = omap_gem_attach_pages(obj); 303 if (ret) { 304 dev_err(obj->dev->dev, "could not attach pages\n"); 305 return ret; 306 } 307 } 308 309 /* TODO: even phys-contig.. we should have a list of pages? */ 310 *pages = omap_obj->pages; 311 312 return 0; 313 } 314 315 /** release backing pages */ 316 static void omap_gem_detach_pages(struct drm_gem_object *obj) 317 { 318 struct omap_gem_object *omap_obj = to_omap_bo(obj); 319 unsigned int npages = obj->size >> PAGE_SHIFT; 320 unsigned int i; 321 322 for (i = 0; i < npages; i++) { 323 if (omap_obj->dma_addrs[i]) 324 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], 325 PAGE_SIZE, DMA_TO_DEVICE); 326 } 327 328 kfree(omap_obj->dma_addrs); 329 omap_obj->dma_addrs = NULL; 330 331 drm_gem_put_pages(obj, omap_obj->pages, true, false); 332 omap_obj->pages = NULL; 333 } 334 335 /* get buffer flags */ 336 uint32_t omap_gem_flags(struct drm_gem_object *obj) 337 { 338 return to_omap_bo(obj)->flags; 339 } 340 341 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 342 { 343 uint64_t offset; 344 mutex_lock(&obj->dev->struct_mutex); 345 offset = mmap_offset(obj); 346 mutex_unlock(&obj->dev->struct_mutex); 347 return offset; 348 } 349 350 /** get mmap size */ 351 size_t omap_gem_mmap_size(struct drm_gem_object *obj) 352 { 353 struct omap_gem_object *omap_obj = to_omap_bo(obj); 354 size_t size = obj->size; 355 356 if (omap_obj->flags & OMAP_BO_TILED) { 357 /* for tiled buffers, the virtual size has stride rounded up 358 * to 4kb.. (to hide the fact that row n+1 might start 16kb or 359 * 32kb later!). But we don't back the entire buffer with 360 * pages, only the valid picture part.. so need to adjust for 361 * this in the size used to mmap and generate mmap offset 362 */ 363 size = tiler_vsize(gem2fmt(omap_obj->flags), 364 omap_obj->width, omap_obj->height); 365 } 366 367 return size; 368 } 369 370 /* ----------------------------------------------------------------------------- 371 * Fault Handling 372 */ 373 374 /* Normal handling for the case of faulting in non-tiled buffers */ 375 static int fault_1d(struct drm_gem_object *obj, 376 struct vm_area_struct *vma, struct vm_fault *vmf) 377 { 378 struct omap_gem_object *omap_obj = to_omap_bo(obj); 379 unsigned long pfn; 380 pgoff_t pgoff; 381 382 /* We don't use vmf->pgoff since that has the fake offset: */ 383 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 384 385 if (omap_obj->pages) { 386 omap_gem_cpu_sync_page(obj, pgoff); 387 pfn = page_to_pfn(omap_obj->pages[pgoff]); 388 } else { 389 BUG_ON(!is_contiguous(omap_obj)); 390 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; 391 } 392 393 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 394 pfn, pfn << PAGE_SHIFT); 395 396 return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 397 } 398 399 /* Special handling for the case of faulting in 2d tiled buffers */ 400 static int fault_2d(struct drm_gem_object *obj, 401 struct vm_area_struct *vma, struct vm_fault *vmf) 402 { 403 struct omap_gem_object *omap_obj = to_omap_bo(obj); 404 struct omap_drm_private *priv = obj->dev->dev_private; 405 struct omap_drm_usergart_entry *entry; 406 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 407 struct page *pages[64]; /* XXX is this too much to have on stack? */ 408 unsigned long pfn; 409 pgoff_t pgoff, base_pgoff; 410 unsigned long vaddr; 411 int i, ret, slots; 412 413 /* 414 * Note the height of the slot is also equal to the number of pages 415 * that need to be mapped in to fill 4kb wide CPU page. If the slot 416 * height is 64, then 64 pages fill a 4kb wide by 64 row region. 417 */ 418 const int n = priv->usergart[fmt].height; 419 const int n_shift = priv->usergart[fmt].height_shift; 420 421 /* 422 * If buffer width in bytes > PAGE_SIZE then the virtual stride is 423 * rounded up to next multiple of PAGE_SIZE.. this need to be taken 424 * into account in some of the math, so figure out virtual stride 425 * in pages 426 */ 427 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); 428 429 /* We don't use vmf->pgoff since that has the fake offset: */ 430 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 431 432 /* 433 * Actual address we start mapping at is rounded down to previous slot 434 * boundary in the y direction: 435 */ 436 base_pgoff = round_down(pgoff, m << n_shift); 437 438 /* figure out buffer width in slots */ 439 slots = omap_obj->width >> priv->usergart[fmt].slot_shift; 440 441 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); 442 443 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; 444 445 /* evict previous buffer using this usergart entry, if any: */ 446 if (entry->obj) 447 evict_entry(entry->obj, fmt, entry); 448 449 entry->obj = obj; 450 entry->obj_pgoff = base_pgoff; 451 452 /* now convert base_pgoff to phys offset from virt offset: */ 453 base_pgoff = (base_pgoff >> n_shift) * slots; 454 455 /* for wider-than 4k.. figure out which part of the slot-row we want: */ 456 if (m > 1) { 457 int off = pgoff % m; 458 entry->obj_pgoff += off; 459 base_pgoff /= m; 460 slots = min(slots - (off << n_shift), n); 461 base_pgoff += off << n_shift; 462 vaddr += off << PAGE_SHIFT; 463 } 464 465 /* 466 * Map in pages. Beyond the valid pixel part of the buffer, we set 467 * pages[i] to NULL to get a dummy page mapped in.. if someone 468 * reads/writes it they will get random/undefined content, but at 469 * least it won't be corrupting whatever other random page used to 470 * be mapped in, or other undefined behavior. 471 */ 472 memcpy(pages, &omap_obj->pages[base_pgoff], 473 sizeof(struct page *) * slots); 474 memset(pages + slots, 0, 475 sizeof(struct page *) * (n - slots)); 476 477 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 478 if (ret) { 479 dev_err(obj->dev->dev, "failed to pin: %d\n", ret); 480 return ret; 481 } 482 483 pfn = entry->dma_addr >> PAGE_SHIFT; 484 485 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 486 pfn, pfn << PAGE_SHIFT); 487 488 for (i = n; i > 0; i--) { 489 vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); 490 pfn += priv->usergart[fmt].stride_pfn; 491 vaddr += PAGE_SIZE * m; 492 } 493 494 /* simple round-robin: */ 495 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) 496 % NUM_USERGART_ENTRIES; 497 498 return 0; 499 } 500 501 /** 502 * omap_gem_fault - pagefault handler for GEM objects 503 * @vmf: fault detail 504 * 505 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 506 * does most of the work for us including the actual map/unmap calls 507 * but we need to do the actual page work. 508 * 509 * The VMA was set up by GEM. In doing so it also ensured that the 510 * vma->vm_private_data points to the GEM object that is backing this 511 * mapping. 512 */ 513 int omap_gem_fault(struct vm_fault *vmf) 514 { 515 struct vm_area_struct *vma = vmf->vma; 516 struct drm_gem_object *obj = vma->vm_private_data; 517 struct omap_gem_object *omap_obj = to_omap_bo(obj); 518 struct drm_device *dev = obj->dev; 519 struct page **pages; 520 int ret; 521 522 /* Make sure we don't parallel update on a fault, nor move or remove 523 * something from beneath our feet 524 */ 525 mutex_lock(&dev->struct_mutex); 526 527 /* if a shmem backed object, make sure we have pages attached now */ 528 ret = get_pages(obj, &pages); 529 if (ret) 530 goto fail; 531 532 /* where should we do corresponding put_pages().. we are mapping 533 * the original page, rather than thru a GART, so we can't rely 534 * on eviction to trigger this. But munmap() or all mappings should 535 * probably trigger put_pages()? 536 */ 537 538 if (omap_obj->flags & OMAP_BO_TILED) 539 ret = fault_2d(obj, vma, vmf); 540 else 541 ret = fault_1d(obj, vma, vmf); 542 543 544 fail: 545 mutex_unlock(&dev->struct_mutex); 546 switch (ret) { 547 case 0: 548 case -ERESTARTSYS: 549 case -EINTR: 550 case -EBUSY: 551 /* 552 * EBUSY is ok: this just means that another thread 553 * already did the job. 554 */ 555 return VM_FAULT_NOPAGE; 556 case -ENOMEM: 557 return VM_FAULT_OOM; 558 default: 559 return VM_FAULT_SIGBUS; 560 } 561 } 562 563 /** We override mainly to fix up some of the vm mapping flags.. */ 564 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 565 { 566 int ret; 567 568 ret = drm_gem_mmap(filp, vma); 569 if (ret) { 570 DBG("mmap failed: %d", ret); 571 return ret; 572 } 573 574 return omap_gem_mmap_obj(vma->vm_private_data, vma); 575 } 576 577 int omap_gem_mmap_obj(struct drm_gem_object *obj, 578 struct vm_area_struct *vma) 579 { 580 struct omap_gem_object *omap_obj = to_omap_bo(obj); 581 582 vma->vm_flags &= ~VM_PFNMAP; 583 vma->vm_flags |= VM_MIXEDMAP; 584 585 if (omap_obj->flags & OMAP_BO_WC) { 586 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 587 } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 588 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 589 } else { 590 /* 591 * We do have some private objects, at least for scanout buffers 592 * on hardware without DMM/TILER. But these are allocated write- 593 * combine 594 */ 595 if (WARN_ON(!obj->filp)) 596 return -EINVAL; 597 598 /* 599 * Shunt off cached objs to shmem file so they have their own 600 * address_space (so unmap_mapping_range does what we want, 601 * in particular in the case of mmap'd dmabufs) 602 */ 603 fput(vma->vm_file); 604 vma->vm_pgoff = 0; 605 vma->vm_file = get_file(obj->filp); 606 607 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 608 } 609 610 return 0; 611 } 612 613 /* ----------------------------------------------------------------------------- 614 * Dumb Buffers 615 */ 616 617 /** 618 * omap_gem_dumb_create - create a dumb buffer 619 * @drm_file: our client file 620 * @dev: our device 621 * @args: the requested arguments copied from userspace 622 * 623 * Allocate a buffer suitable for use for a frame buffer of the 624 * form described by user space. Give userspace a handle by which 625 * to reference it. 626 */ 627 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 628 struct drm_mode_create_dumb *args) 629 { 630 union omap_gem_size gsize; 631 632 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 633 634 args->size = PAGE_ALIGN(args->pitch * args->height); 635 636 gsize = (union omap_gem_size){ 637 .bytes = args->size, 638 }; 639 640 return omap_gem_new_handle(dev, file, gsize, 641 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 642 } 643 644 /** 645 * omap_gem_dumb_map - buffer mapping for dumb interface 646 * @file: our drm client file 647 * @dev: drm device 648 * @handle: GEM handle to the object (from dumb_create) 649 * 650 * Do the necessary setup to allow the mapping of the frame buffer 651 * into user memory. We don't have to do much here at the moment. 652 */ 653 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 654 uint32_t handle, uint64_t *offset) 655 { 656 struct drm_gem_object *obj; 657 int ret = 0; 658 659 /* GEM does all our handle to object mapping */ 660 obj = drm_gem_object_lookup(file, handle); 661 if (obj == NULL) { 662 ret = -ENOENT; 663 goto fail; 664 } 665 666 *offset = omap_gem_mmap_offset(obj); 667 668 drm_gem_object_unreference_unlocked(obj); 669 670 fail: 671 return ret; 672 } 673 674 #ifdef CONFIG_DRM_FBDEV_EMULATION 675 /* Set scrolling position. This allows us to implement fast scrolling 676 * for console. 677 * 678 * Call only from non-atomic contexts. 679 */ 680 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 681 { 682 struct omap_gem_object *omap_obj = to_omap_bo(obj); 683 uint32_t npages = obj->size >> PAGE_SHIFT; 684 int ret = 0; 685 686 if (roll > npages) { 687 dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 688 return -EINVAL; 689 } 690 691 omap_obj->roll = roll; 692 693 mutex_lock(&obj->dev->struct_mutex); 694 695 /* if we aren't mapped yet, we don't need to do anything */ 696 if (omap_obj->block) { 697 struct page **pages; 698 ret = get_pages(obj, &pages); 699 if (ret) 700 goto fail; 701 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 702 if (ret) 703 dev_err(obj->dev->dev, "could not repin: %d\n", ret); 704 } 705 706 fail: 707 mutex_unlock(&obj->dev->struct_mutex); 708 709 return ret; 710 } 711 #endif 712 713 /* ----------------------------------------------------------------------------- 714 * Memory Management & DMA Sync 715 */ 716 717 /* 718 * shmem buffers that are mapped cached are not coherent. 719 * 720 * We keep track of dirty pages using page faulting to perform cache management. 721 * When a page is mapped to the CPU in read/write mode the device can't access 722 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device 723 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is 724 * unmapped from the CPU. 725 */ 726 static inline bool is_cached_coherent(struct drm_gem_object *obj) 727 { 728 struct omap_gem_object *omap_obj = to_omap_bo(obj); 729 730 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) && 731 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED)); 732 } 733 734 /* Sync the buffer for CPU access.. note pages should already be 735 * attached, ie. omap_gem_get_pages() 736 */ 737 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) 738 { 739 struct drm_device *dev = obj->dev; 740 struct omap_gem_object *omap_obj = to_omap_bo(obj); 741 742 if (is_cached_coherent(obj)) 743 return; 744 745 if (omap_obj->dma_addrs[pgoff]) { 746 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], 747 PAGE_SIZE, DMA_TO_DEVICE); 748 omap_obj->dma_addrs[pgoff] = 0; 749 } 750 } 751 752 /* sync the buffer for DMA access */ 753 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, 754 enum dma_data_direction dir) 755 { 756 struct drm_device *dev = obj->dev; 757 struct omap_gem_object *omap_obj = to_omap_bo(obj); 758 int i, npages = obj->size >> PAGE_SHIFT; 759 struct page **pages = omap_obj->pages; 760 bool dirty = false; 761 762 if (is_cached_coherent(obj)) 763 return; 764 765 for (i = 0; i < npages; i++) { 766 if (!omap_obj->dma_addrs[i]) { 767 dma_addr_t addr; 768 769 addr = dma_map_page(dev->dev, pages[i], 0, 770 PAGE_SIZE, dir); 771 if (dma_mapping_error(dev->dev, addr)) { 772 dev_warn(dev->dev, "%s: failed to map page\n", 773 __func__); 774 break; 775 } 776 777 dirty = true; 778 omap_obj->dma_addrs[i] = addr; 779 } 780 } 781 782 if (dirty) { 783 unmap_mapping_range(obj->filp->f_mapping, 0, 784 omap_gem_mmap_size(obj), 1); 785 } 786 } 787 788 /** 789 * omap_gem_pin() - Pin a GEM object in memory 790 * @obj: the GEM object 791 * @dma_addr: the DMA address 792 * 793 * Pin the given GEM object in memory and fill the dma_addr pointer with the 794 * object's DMA address. If the buffer is not physically contiguous it will be 795 * remapped through the TILER to provide a contiguous view. 796 * 797 * Pins are reference-counted, calling this function multiple times is allowed 798 * as long the corresponding omap_gem_unpin() calls are balanced. 799 * 800 * Return 0 on success or a negative error code otherwise. 801 */ 802 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) 803 { 804 struct omap_drm_private *priv = obj->dev->dev_private; 805 struct omap_gem_object *omap_obj = to_omap_bo(obj); 806 int ret = 0; 807 808 mutex_lock(&obj->dev->struct_mutex); 809 810 if (!is_contiguous(omap_obj) && priv->has_dmm) { 811 if (omap_obj->dma_addr_cnt == 0) { 812 struct page **pages; 813 uint32_t npages = obj->size >> PAGE_SHIFT; 814 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 815 struct tiler_block *block; 816 817 BUG_ON(omap_obj->block); 818 819 ret = get_pages(obj, &pages); 820 if (ret) 821 goto fail; 822 823 if (omap_obj->flags & OMAP_BO_TILED) { 824 block = tiler_reserve_2d(fmt, 825 omap_obj->width, 826 omap_obj->height, 0); 827 } else { 828 block = tiler_reserve_1d(obj->size); 829 } 830 831 if (IS_ERR(block)) { 832 ret = PTR_ERR(block); 833 dev_err(obj->dev->dev, 834 "could not remap: %d (%d)\n", ret, fmt); 835 goto fail; 836 } 837 838 /* TODO: enable async refill.. */ 839 ret = tiler_pin(block, pages, npages, 840 omap_obj->roll, true); 841 if (ret) { 842 tiler_release(block); 843 dev_err(obj->dev->dev, 844 "could not pin: %d\n", ret); 845 goto fail; 846 } 847 848 omap_obj->dma_addr = tiler_ssptr(block); 849 omap_obj->block = block; 850 851 DBG("got dma address: %pad", &omap_obj->dma_addr); 852 } 853 854 omap_obj->dma_addr_cnt++; 855 856 *dma_addr = omap_obj->dma_addr; 857 } else if (is_contiguous(omap_obj)) { 858 *dma_addr = omap_obj->dma_addr; 859 } else { 860 ret = -EINVAL; 861 goto fail; 862 } 863 864 fail: 865 mutex_unlock(&obj->dev->struct_mutex); 866 867 return ret; 868 } 869 870 /** 871 * omap_gem_unpin() - Unpin a GEM object from memory 872 * @obj: the GEM object 873 * 874 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are 875 * reference-counted, the actualy unpin will only be performed when the number 876 * of calls to this function matches the number of calls to omap_gem_pin(). 877 */ 878 void omap_gem_unpin(struct drm_gem_object *obj) 879 { 880 struct omap_gem_object *omap_obj = to_omap_bo(obj); 881 int ret; 882 883 mutex_lock(&obj->dev->struct_mutex); 884 if (omap_obj->dma_addr_cnt > 0) { 885 omap_obj->dma_addr_cnt--; 886 if (omap_obj->dma_addr_cnt == 0) { 887 ret = tiler_unpin(omap_obj->block); 888 if (ret) { 889 dev_err(obj->dev->dev, 890 "could not unpin pages: %d\n", ret); 891 } 892 ret = tiler_release(omap_obj->block); 893 if (ret) { 894 dev_err(obj->dev->dev, 895 "could not release unmap: %d\n", ret); 896 } 897 omap_obj->dma_addr = 0; 898 omap_obj->block = NULL; 899 } 900 } 901 902 mutex_unlock(&obj->dev->struct_mutex); 903 } 904 905 /* Get rotated scanout address (only valid if already pinned), at the 906 * specified orientation and x,y offset from top-left corner of buffer 907 * (only valid for tiled 2d buffers) 908 */ 909 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient, 910 int x, int y, dma_addr_t *dma_addr) 911 { 912 struct omap_gem_object *omap_obj = to_omap_bo(obj); 913 int ret = -EINVAL; 914 915 mutex_lock(&obj->dev->struct_mutex); 916 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block && 917 (omap_obj->flags & OMAP_BO_TILED)) { 918 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); 919 ret = 0; 920 } 921 mutex_unlock(&obj->dev->struct_mutex); 922 return ret; 923 } 924 925 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 926 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 927 { 928 struct omap_gem_object *omap_obj = to_omap_bo(obj); 929 int ret = -EINVAL; 930 if (omap_obj->flags & OMAP_BO_TILED) 931 ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 932 return ret; 933 } 934 935 /* if !remap, and we don't have pages backing, then fail, rather than 936 * increasing the pin count (which we don't really do yet anyways, 937 * because we don't support swapping pages back out). And 'remap' 938 * might not be quite the right name, but I wanted to keep it working 939 * similarly to omap_gem_pin(). Note though that mutex is not 940 * aquired if !remap (because this can be called in atomic ctxt), 941 * but probably omap_gem_unpin() should be changed to work in the 942 * same way. If !remap, a matching omap_gem_put_pages() call is not 943 * required (and should not be made). 944 */ 945 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 946 bool remap) 947 { 948 int ret; 949 if (!remap) { 950 struct omap_gem_object *omap_obj = to_omap_bo(obj); 951 if (!omap_obj->pages) 952 return -ENOMEM; 953 *pages = omap_obj->pages; 954 return 0; 955 } 956 mutex_lock(&obj->dev->struct_mutex); 957 ret = get_pages(obj, pages); 958 mutex_unlock(&obj->dev->struct_mutex); 959 return ret; 960 } 961 962 /* release pages when DMA no longer being performed */ 963 int omap_gem_put_pages(struct drm_gem_object *obj) 964 { 965 /* do something here if we dynamically attach/detach pages.. at 966 * least they would no longer need to be pinned if everyone has 967 * released the pages.. 968 */ 969 return 0; 970 } 971 972 #ifdef CONFIG_DRM_FBDEV_EMULATION 973 /* Get kernel virtual address for CPU access.. this more or less only 974 * exists for omap_fbdev. This should be called with struct_mutex 975 * held. 976 */ 977 void *omap_gem_vaddr(struct drm_gem_object *obj) 978 { 979 struct omap_gem_object *omap_obj = to_omap_bo(obj); 980 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 981 if (!omap_obj->vaddr) { 982 struct page **pages; 983 int ret = get_pages(obj, &pages); 984 if (ret) 985 return ERR_PTR(ret); 986 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 987 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 988 } 989 return omap_obj->vaddr; 990 } 991 #endif 992 993 /* ----------------------------------------------------------------------------- 994 * Power Management 995 */ 996 997 #ifdef CONFIG_PM 998 /* re-pin objects in DMM in resume path: */ 999 int omap_gem_resume(struct device *dev) 1000 { 1001 struct drm_device *drm_dev = dev_get_drvdata(dev); 1002 struct omap_drm_private *priv = drm_dev->dev_private; 1003 struct omap_gem_object *omap_obj; 1004 int ret = 0; 1005 1006 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 1007 if (omap_obj->block) { 1008 struct drm_gem_object *obj = &omap_obj->base; 1009 uint32_t npages = obj->size >> PAGE_SHIFT; 1010 WARN_ON(!omap_obj->pages); /* this can't happen */ 1011 ret = tiler_pin(omap_obj->block, 1012 omap_obj->pages, npages, 1013 omap_obj->roll, true); 1014 if (ret) { 1015 dev_err(dev, "could not repin: %d\n", ret); 1016 return ret; 1017 } 1018 } 1019 } 1020 1021 return 0; 1022 } 1023 #endif 1024 1025 /* ----------------------------------------------------------------------------- 1026 * DebugFS 1027 */ 1028 1029 #ifdef CONFIG_DEBUG_FS 1030 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 1031 { 1032 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1033 uint64_t off; 1034 1035 off = drm_vma_node_start(&obj->vma_node); 1036 1037 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 1038 omap_obj->flags, obj->name, kref_read(&obj->refcount), 1039 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt, 1040 omap_obj->vaddr, omap_obj->roll); 1041 1042 if (omap_obj->flags & OMAP_BO_TILED) { 1043 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 1044 if (omap_obj->block) { 1045 struct tcm_area *area = &omap_obj->block->area; 1046 seq_printf(m, " (%dx%d, %dx%d)", 1047 area->p0.x, area->p0.y, 1048 area->p1.x, area->p1.y); 1049 } 1050 } else { 1051 seq_printf(m, " %zu", obj->size); 1052 } 1053 1054 seq_printf(m, "\n"); 1055 } 1056 1057 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 1058 { 1059 struct omap_gem_object *omap_obj; 1060 int count = 0; 1061 size_t size = 0; 1062 1063 list_for_each_entry(omap_obj, list, mm_list) { 1064 struct drm_gem_object *obj = &omap_obj->base; 1065 seq_printf(m, " "); 1066 omap_gem_describe(obj, m); 1067 count++; 1068 size += obj->size; 1069 } 1070 1071 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 1072 } 1073 #endif 1074 1075 /* ----------------------------------------------------------------------------- 1076 * Constructor & Destructor 1077 */ 1078 1079 void omap_gem_free_object(struct drm_gem_object *obj) 1080 { 1081 struct drm_device *dev = obj->dev; 1082 struct omap_drm_private *priv = dev->dev_private; 1083 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1084 1085 evict(obj); 1086 1087 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1088 1089 spin_lock(&priv->list_lock); 1090 list_del(&omap_obj->mm_list); 1091 spin_unlock(&priv->list_lock); 1092 1093 /* this means the object is still pinned.. which really should 1094 * not happen. I think.. 1095 */ 1096 WARN_ON(omap_obj->dma_addr_cnt > 0); 1097 1098 if (omap_obj->pages) { 1099 if (omap_obj->flags & OMAP_BO_MEM_DMABUF) 1100 kfree(omap_obj->pages); 1101 else 1102 omap_gem_detach_pages(obj); 1103 } 1104 1105 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { 1106 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, 1107 omap_obj->dma_addr); 1108 } else if (omap_obj->vaddr) { 1109 vunmap(omap_obj->vaddr); 1110 } else if (obj->import_attach) { 1111 drm_prime_gem_destroy(obj, omap_obj->sgt); 1112 } 1113 1114 drm_gem_object_release(obj); 1115 1116 kfree(omap_obj); 1117 } 1118 1119 /* GEM buffer object constructor */ 1120 struct drm_gem_object *omap_gem_new(struct drm_device *dev, 1121 union omap_gem_size gsize, uint32_t flags) 1122 { 1123 struct omap_drm_private *priv = dev->dev_private; 1124 struct omap_gem_object *omap_obj; 1125 struct drm_gem_object *obj; 1126 struct address_space *mapping; 1127 size_t size; 1128 int ret; 1129 1130 /* Validate the flags and compute the memory and cache flags. */ 1131 if (flags & OMAP_BO_TILED) { 1132 if (!priv->usergart) { 1133 dev_err(dev->dev, "Tiled buffers require DMM\n"); 1134 return NULL; 1135 } 1136 1137 /* 1138 * Tiled buffers are always shmem paged backed. When they are 1139 * scanned out, they are remapped into DMM/TILER. 1140 */ 1141 flags &= ~OMAP_BO_SCANOUT; 1142 flags |= OMAP_BO_MEM_SHMEM; 1143 1144 /* 1145 * Currently don't allow cached buffers. There is some caching 1146 * stuff that needs to be handled better. 1147 */ 1148 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); 1149 flags |= tiler_get_cpu_cache_flags(); 1150 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 1151 /* 1152 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be 1153 * tiled. However, to lower the pressure on memory allocation, 1154 * use contiguous memory only if no TILER is available. 1155 */ 1156 flags |= OMAP_BO_MEM_DMA_API; 1157 } else if (!(flags & OMAP_BO_MEM_DMABUF)) { 1158 /* 1159 * All other buffers not backed by dma_buf are shmem-backed. 1160 */ 1161 flags |= OMAP_BO_MEM_SHMEM; 1162 } 1163 1164 /* Allocate the initialize the OMAP GEM object. */ 1165 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1166 if (!omap_obj) 1167 return NULL; 1168 1169 obj = &omap_obj->base; 1170 omap_obj->flags = flags; 1171 1172 if (flags & OMAP_BO_TILED) { 1173 /* 1174 * For tiled buffers align dimensions to slot boundaries and 1175 * calculate size based on aligned dimensions. 1176 */ 1177 tiler_align(gem2fmt(flags), &gsize.tiled.width, 1178 &gsize.tiled.height); 1179 1180 size = tiler_size(gem2fmt(flags), gsize.tiled.width, 1181 gsize.tiled.height); 1182 1183 omap_obj->width = gsize.tiled.width; 1184 omap_obj->height = gsize.tiled.height; 1185 } else { 1186 size = PAGE_ALIGN(gsize.bytes); 1187 } 1188 1189 /* Initialize the GEM object. */ 1190 if (!(flags & OMAP_BO_MEM_SHMEM)) { 1191 drm_gem_private_object_init(dev, obj, size); 1192 } else { 1193 ret = drm_gem_object_init(dev, obj, size); 1194 if (ret) 1195 goto err_free; 1196 1197 mapping = obj->filp->f_mapping; 1198 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1199 } 1200 1201 /* Allocate memory if needed. */ 1202 if (flags & OMAP_BO_MEM_DMA_API) { 1203 omap_obj->vaddr = dma_alloc_wc(dev->dev, size, 1204 &omap_obj->dma_addr, 1205 GFP_KERNEL); 1206 if (!omap_obj->vaddr) 1207 goto err_release; 1208 } 1209 1210 spin_lock(&priv->list_lock); 1211 list_add(&omap_obj->mm_list, &priv->obj_list); 1212 spin_unlock(&priv->list_lock); 1213 1214 return obj; 1215 1216 err_release: 1217 drm_gem_object_release(obj); 1218 err_free: 1219 kfree(omap_obj); 1220 return NULL; 1221 } 1222 1223 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, 1224 struct sg_table *sgt) 1225 { 1226 struct omap_drm_private *priv = dev->dev_private; 1227 struct omap_gem_object *omap_obj; 1228 struct drm_gem_object *obj; 1229 union omap_gem_size gsize; 1230 1231 /* Without a DMM only physically contiguous buffers can be supported. */ 1232 if (sgt->orig_nents != 1 && !priv->has_dmm) 1233 return ERR_PTR(-EINVAL); 1234 1235 mutex_lock(&dev->struct_mutex); 1236 1237 gsize.bytes = PAGE_ALIGN(size); 1238 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); 1239 if (!obj) { 1240 obj = ERR_PTR(-ENOMEM); 1241 goto done; 1242 } 1243 1244 omap_obj = to_omap_bo(obj); 1245 omap_obj->sgt = sgt; 1246 1247 if (sgt->orig_nents == 1) { 1248 omap_obj->dma_addr = sg_dma_address(sgt->sgl); 1249 } else { 1250 /* Create pages list from sgt */ 1251 struct sg_page_iter iter; 1252 struct page **pages; 1253 unsigned int npages; 1254 unsigned int i = 0; 1255 1256 npages = DIV_ROUND_UP(size, PAGE_SIZE); 1257 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); 1258 if (!pages) { 1259 omap_gem_free_object(obj); 1260 obj = ERR_PTR(-ENOMEM); 1261 goto done; 1262 } 1263 1264 omap_obj->pages = pages; 1265 1266 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { 1267 pages[i++] = sg_page_iter_page(&iter); 1268 if (i > npages) 1269 break; 1270 } 1271 1272 if (WARN_ON(i != npages)) { 1273 omap_gem_free_object(obj); 1274 obj = ERR_PTR(-ENOMEM); 1275 goto done; 1276 } 1277 } 1278 1279 done: 1280 mutex_unlock(&dev->struct_mutex); 1281 return obj; 1282 } 1283 1284 /* convenience method to construct a GEM buffer object, and userspace handle */ 1285 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1286 union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 1287 { 1288 struct drm_gem_object *obj; 1289 int ret; 1290 1291 obj = omap_gem_new(dev, gsize, flags); 1292 if (!obj) 1293 return -ENOMEM; 1294 1295 ret = drm_gem_handle_create(file, obj, handle); 1296 if (ret) { 1297 omap_gem_free_object(obj); 1298 return ret; 1299 } 1300 1301 /* drop reference from allocate - handle holds it now */ 1302 drm_gem_object_unreference_unlocked(obj); 1303 1304 return 0; 1305 } 1306 1307 /* ----------------------------------------------------------------------------- 1308 * Init & Cleanup 1309 */ 1310 1311 /* If DMM is used, we need to set some stuff up.. */ 1312 void omap_gem_init(struct drm_device *dev) 1313 { 1314 struct omap_drm_private *priv = dev->dev_private; 1315 struct omap_drm_usergart *usergart; 1316 const enum tiler_fmt fmts[] = { 1317 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 1318 }; 1319 int i, j; 1320 1321 if (!dmm_is_available()) { 1322 /* DMM only supported on OMAP4 and later, so this isn't fatal */ 1323 dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 1324 return; 1325 } 1326 1327 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1328 if (!usergart) 1329 return; 1330 1331 /* reserve 4k aligned/wide regions for userspace mappings: */ 1332 for (i = 0; i < ARRAY_SIZE(fmts); i++) { 1333 uint16_t h = 1, w = PAGE_SIZE >> i; 1334 tiler_align(fmts[i], &w, &h); 1335 /* note: since each region is 1 4kb page wide, and minimum 1336 * number of rows, the height ends up being the same as the 1337 * # of pages in the region 1338 */ 1339 usergart[i].height = h; 1340 usergart[i].height_shift = ilog2(h); 1341 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 1342 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 1343 for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1344 struct omap_drm_usergart_entry *entry; 1345 struct tiler_block *block; 1346 1347 entry = &usergart[i].entry[j]; 1348 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); 1349 if (IS_ERR(block)) { 1350 dev_err(dev->dev, 1351 "reserve failed: %d, %d, %ld\n", 1352 i, j, PTR_ERR(block)); 1353 return; 1354 } 1355 entry->dma_addr = tiler_ssptr(block); 1356 entry->block = block; 1357 1358 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h, 1359 &entry->dma_addr, 1360 usergart[i].stride_pfn << PAGE_SHIFT); 1361 } 1362 } 1363 1364 priv->usergart = usergart; 1365 priv->has_dmm = true; 1366 } 1367 1368 void omap_gem_deinit(struct drm_device *dev) 1369 { 1370 struct omap_drm_private *priv = dev->dev_private; 1371 1372 /* I believe we can rely on there being no more outstanding GEM 1373 * objects which could depend on usergart/dmm at this point. 1374 */ 1375 kfree(priv->usergart); 1376 } 1377