18bb0daffSRob Clark /* 28bb0daffSRob Clark * drivers/gpu/drm/omapdrm/omap_gem.c 38bb0daffSRob Clark * 48bb0daffSRob Clark * Copyright (C) 2011 Texas Instruments 58bb0daffSRob Clark * Author: Rob Clark <rob.clark@linaro.org> 68bb0daffSRob Clark * 78bb0daffSRob Clark * This program is free software; you can redistribute it and/or modify it 88bb0daffSRob Clark * under the terms of the GNU General Public License version 2 as published by 98bb0daffSRob Clark * the Free Software Foundation. 108bb0daffSRob Clark * 118bb0daffSRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 128bb0daffSRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 138bb0daffSRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 148bb0daffSRob Clark * more details. 158bb0daffSRob Clark * 168bb0daffSRob Clark * You should have received a copy of the GNU General Public License along with 178bb0daffSRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 188bb0daffSRob Clark */ 198bb0daffSRob Clark 208bb0daffSRob Clark #include <linux/shmem_fs.h> 212d278f54SLaurent Pinchart #include <linux/spinlock.h> 222d278f54SLaurent Pinchart 230de23977SDavid Herrmann #include <drm/drm_vma_manager.h> 248bb0daffSRob Clark 258bb0daffSRob Clark #include "omap_drv.h" 268bb0daffSRob Clark #include "omap_dmm_tiler.h" 278bb0daffSRob Clark 288bb0daffSRob Clark /* 298bb0daffSRob Clark * GEM buffer object implementation. 308bb0daffSRob Clark */ 318bb0daffSRob Clark 328bb0daffSRob Clark /* note: we use upper 8 bits of flags for driver-internal flags: */ 338bb0daffSRob Clark #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ 348bb0daffSRob Clark #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ 358bb0daffSRob Clark #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ 368bb0daffSRob Clark 378bb0daffSRob Clark struct omap_gem_object { 388bb0daffSRob Clark struct drm_gem_object base; 398bb0daffSRob Clark 408bb0daffSRob Clark struct list_head mm_list; 418bb0daffSRob Clark 428bb0daffSRob Clark uint32_t flags; 438bb0daffSRob Clark 448bb0daffSRob Clark /** width/height for tiled formats (rounded up to slot boundaries) */ 458bb0daffSRob Clark uint16_t width, height; 468bb0daffSRob Clark 478bb0daffSRob Clark /** roll applied when mapping to DMM */ 488bb0daffSRob Clark uint32_t roll; 498bb0daffSRob Clark 508bb0daffSRob Clark /** 518bb0daffSRob Clark * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag 528bb0daffSRob Clark * is set and the paddr is valid. Also if the buffer is remapped in 538bb0daffSRob Clark * TILER and paddr_cnt > 0, then paddr is valid. But if you are using 548bb0daffSRob Clark * the physical address and OMAP_BO_DMA is not set, then you should 558bb0daffSRob Clark * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is 568bb0daffSRob Clark * not removed from under your feet. 578bb0daffSRob Clark * 588bb0daffSRob Clark * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable 598bb0daffSRob Clark * buffer is requested, but doesn't mean that it is. Use the 608bb0daffSRob Clark * OMAP_BO_DMA flag to determine if the buffer has a DMA capable 618bb0daffSRob Clark * physical address. 628bb0daffSRob Clark */ 638bb0daffSRob Clark dma_addr_t paddr; 648bb0daffSRob Clark 658bb0daffSRob Clark /** 668bb0daffSRob Clark * # of users of paddr 678bb0daffSRob Clark */ 688bb0daffSRob Clark uint32_t paddr_cnt; 698bb0daffSRob Clark 708bb0daffSRob Clark /** 718bb0daffSRob Clark * tiler block used when buffer is remapped in DMM/TILER. 728bb0daffSRob Clark */ 738bb0daffSRob Clark struct tiler_block *block; 748bb0daffSRob Clark 758bb0daffSRob Clark /** 768bb0daffSRob Clark * Array of backing pages, if allocated. Note that pages are never 778bb0daffSRob Clark * allocated for buffers originally allocated from contiguous memory 788bb0daffSRob Clark */ 798bb0daffSRob Clark struct page **pages; 808bb0daffSRob Clark 818bb0daffSRob Clark /** addresses corresponding to pages in above array */ 828bb0daffSRob Clark dma_addr_t *addrs; 838bb0daffSRob Clark 848bb0daffSRob Clark /** 858bb0daffSRob Clark * Virtual address, if mapped. 868bb0daffSRob Clark */ 878bb0daffSRob Clark void *vaddr; 888bb0daffSRob Clark 898bb0daffSRob Clark /** 908bb0daffSRob Clark * sync-object allocated on demand (if needed) 918bb0daffSRob Clark * 928bb0daffSRob Clark * Per-buffer sync-object for tracking pending and completed hw/dma 938bb0daffSRob Clark * read and write operations. The layout in memory is dictated by 948bb0daffSRob Clark * the SGX firmware, which uses this information to stall the command 958bb0daffSRob Clark * stream if a surface is not ready yet. 968bb0daffSRob Clark * 978bb0daffSRob Clark * Note that when buffer is used by SGX, the sync-object needs to be 988bb0daffSRob Clark * allocated from a special heap of sync-objects. This way many sync 998bb0daffSRob Clark * objects can be packed in a page, and not waste GPU virtual address 1008bb0daffSRob Clark * space. Because of this we have to have a omap_gem_set_sync_object() 1018bb0daffSRob Clark * API to allow replacement of the syncobj after it has (potentially) 1028bb0daffSRob Clark * already been allocated. A bit ugly but I haven't thought of a 1038bb0daffSRob Clark * better alternative. 1048bb0daffSRob Clark */ 1058bb0daffSRob Clark struct { 1068bb0daffSRob Clark uint32_t write_pending; 1078bb0daffSRob Clark uint32_t write_complete; 1088bb0daffSRob Clark uint32_t read_pending; 1098bb0daffSRob Clark uint32_t read_complete; 1108bb0daffSRob Clark } *sync; 1118bb0daffSRob Clark }; 1128bb0daffSRob Clark 1137ef93b0aSLaurent Pinchart #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 1148bb0daffSRob Clark 1158bb0daffSRob Clark /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 1168bb0daffSRob Clark * not necessarily pinned in TILER all the time, and (b) when they are 1178bb0daffSRob Clark * they are not necessarily page aligned, we reserve one or more small 1188bb0daffSRob Clark * regions in each of the 2d containers to use as a user-GART where we 1198bb0daffSRob Clark * can create a second page-aligned mapping of parts of the buffer 1208bb0daffSRob Clark * being accessed from userspace. 1218bb0daffSRob Clark * 1228bb0daffSRob Clark * Note that we could optimize slightly when we know that multiple 1238bb0daffSRob Clark * tiler containers are backed by the same PAT.. but I'll leave that 1248bb0daffSRob Clark * for later.. 1258bb0daffSRob Clark */ 1268bb0daffSRob Clark #define NUM_USERGART_ENTRIES 2 127f4302747SLaurent Pinchart struct omap_drm_usergart_entry { 1288bb0daffSRob Clark struct tiler_block *block; /* the reserved tiler block */ 1298bb0daffSRob Clark dma_addr_t paddr; 1308bb0daffSRob Clark struct drm_gem_object *obj; /* the current pinned obj */ 1318bb0daffSRob Clark pgoff_t obj_pgoff; /* page offset of obj currently 1328bb0daffSRob Clark mapped in */ 1338bb0daffSRob Clark }; 134f4302747SLaurent Pinchart 135f4302747SLaurent Pinchart struct omap_drm_usergart { 136f4302747SLaurent Pinchart struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; 1378bb0daffSRob Clark int height; /* height in rows */ 1388bb0daffSRob Clark int height_shift; /* ilog2(height in rows) */ 1398bb0daffSRob Clark int slot_shift; /* ilog2(width per slot) */ 1408bb0daffSRob Clark int stride_pfn; /* stride in pages */ 1418bb0daffSRob Clark int last; /* index of last used entry */ 142f4302747SLaurent Pinchart }; 1438bb0daffSRob Clark 144b902f8f4SLaurent Pinchart /* ----------------------------------------------------------------------------- 145b902f8f4SLaurent Pinchart * Helpers 146b902f8f4SLaurent Pinchart */ 147b902f8f4SLaurent Pinchart 148b902f8f4SLaurent Pinchart /** get mmap offset */ 149b902f8f4SLaurent Pinchart static uint64_t mmap_offset(struct drm_gem_object *obj) 150b902f8f4SLaurent Pinchart { 151b902f8f4SLaurent Pinchart struct drm_device *dev = obj->dev; 152b902f8f4SLaurent Pinchart int ret; 153b902f8f4SLaurent Pinchart size_t size; 154b902f8f4SLaurent Pinchart 155b902f8f4SLaurent Pinchart WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 156b902f8f4SLaurent Pinchart 157b902f8f4SLaurent Pinchart /* Make it mmapable */ 158b902f8f4SLaurent Pinchart size = omap_gem_mmap_size(obj); 159b902f8f4SLaurent Pinchart ret = drm_gem_create_mmap_offset_size(obj, size); 160b902f8f4SLaurent Pinchart if (ret) { 161b902f8f4SLaurent Pinchart dev_err(dev->dev, "could not allocate mmap offset\n"); 162b902f8f4SLaurent Pinchart return 0; 163b902f8f4SLaurent Pinchart } 164b902f8f4SLaurent Pinchart 165b902f8f4SLaurent Pinchart return drm_vma_node_offset_addr(&obj->vma_node); 166b902f8f4SLaurent Pinchart } 167b902f8f4SLaurent Pinchart 1687ef93b0aSLaurent Pinchart /* GEM objects can either be allocated from contiguous memory (in which 1697ef93b0aSLaurent Pinchart * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non 1707ef93b0aSLaurent Pinchart * contiguous buffers can be remapped in TILER/DMM if they need to be 1717ef93b0aSLaurent Pinchart * contiguous... but we don't do this all the time to reduce pressure 1727ef93b0aSLaurent Pinchart * on TILER/DMM space when we know at allocation time that the buffer 1737ef93b0aSLaurent Pinchart * will need to be scanned out. 1747ef93b0aSLaurent Pinchart */ 1757ef93b0aSLaurent Pinchart static inline bool is_shmem(struct drm_gem_object *obj) 1767ef93b0aSLaurent Pinchart { 1777ef93b0aSLaurent Pinchart return obj->filp != NULL; 1787ef93b0aSLaurent Pinchart } 1797ef93b0aSLaurent Pinchart 1807ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 1817ef93b0aSLaurent Pinchart * Eviction 1827ef93b0aSLaurent Pinchart */ 1837ef93b0aSLaurent Pinchart 1848bb0daffSRob Clark static void evict_entry(struct drm_gem_object *obj, 185f4302747SLaurent Pinchart enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) 1868bb0daffSRob Clark { 1878bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 188f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 189f4302747SLaurent Pinchart int n = priv->usergart[fmt].height; 1908bb0daffSRob Clark size_t size = PAGE_SIZE * n; 1918bb0daffSRob Clark loff_t off = mmap_offset(obj) + 1928bb0daffSRob Clark (entry->obj_pgoff << PAGE_SHIFT); 1938bb0daffSRob Clark const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 1946796cb16SDavid Herrmann 1958bb0daffSRob Clark if (m > 1) { 1968bb0daffSRob Clark int i; 1978bb0daffSRob Clark /* if stride > than PAGE_SIZE then sparse mapping: */ 1988bb0daffSRob Clark for (i = n; i > 0; i--) { 1996796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 2008bb0daffSRob Clark off, PAGE_SIZE, 1); 2018bb0daffSRob Clark off += PAGE_SIZE * m; 2028bb0daffSRob Clark } 2038bb0daffSRob Clark } else { 2046796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 2056796cb16SDavid Herrmann off, size, 1); 2068bb0daffSRob Clark } 2078bb0daffSRob Clark 2088bb0daffSRob Clark entry->obj = NULL; 2098bb0daffSRob Clark } 2108bb0daffSRob Clark 2118bb0daffSRob Clark /* Evict a buffer from usergart, if it is mapped there */ 2128bb0daffSRob Clark static void evict(struct drm_gem_object *obj) 2138bb0daffSRob Clark { 2148bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 215f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 2168bb0daffSRob Clark 2178bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 2188bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 2198bb0daffSRob Clark int i; 2208bb0daffSRob Clark 221f4302747SLaurent Pinchart if (!priv->usergart) 2228bb0daffSRob Clark return; 2238bb0daffSRob Clark 2248bb0daffSRob Clark for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 225f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry = 226f4302747SLaurent Pinchart &priv->usergart[fmt].entry[i]; 227f4302747SLaurent Pinchart 2288bb0daffSRob Clark if (entry->obj == obj) 2298bb0daffSRob Clark evict_entry(obj, fmt, entry); 2308bb0daffSRob Clark } 2318bb0daffSRob Clark } 2328bb0daffSRob Clark } 2338bb0daffSRob Clark 2347ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 2357ef93b0aSLaurent Pinchart * Page Management 2368bb0daffSRob Clark */ 2378bb0daffSRob Clark 2388bb0daffSRob Clark /** ensure backing pages are allocated */ 2398bb0daffSRob Clark static int omap_gem_attach_pages(struct drm_gem_object *obj) 2408bb0daffSRob Clark { 2418bb0daffSRob Clark struct drm_device *dev = obj->dev; 2428bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 2438bb0daffSRob Clark struct page **pages; 2448bb0daffSRob Clark int npages = obj->size >> PAGE_SHIFT; 2458bb0daffSRob Clark int i, ret; 2468bb0daffSRob Clark dma_addr_t *addrs; 2478bb0daffSRob Clark 2488bb0daffSRob Clark WARN_ON(omap_obj->pages); 2498bb0daffSRob Clark 2500cdbe8acSDavid Herrmann pages = drm_gem_get_pages(obj); 2518bb0daffSRob Clark if (IS_ERR(pages)) { 2528bb0daffSRob Clark dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 2538bb0daffSRob Clark return PTR_ERR(pages); 2548bb0daffSRob Clark } 2558bb0daffSRob Clark 2568bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because 2578bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent: 2588bb0daffSRob Clark */ 2598bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 2608bb0daffSRob Clark addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); 2618bb0daffSRob Clark if (!addrs) { 2628bb0daffSRob Clark ret = -ENOMEM; 2638bb0daffSRob Clark goto free_pages; 2648bb0daffSRob Clark } 2658bb0daffSRob Clark 2668bb0daffSRob Clark for (i = 0; i < npages; i++) { 2678bb0daffSRob Clark addrs[i] = dma_map_page(dev->dev, pages[i], 2688bb0daffSRob Clark 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 2698bb0daffSRob Clark } 2708bb0daffSRob Clark } else { 2718bb0daffSRob Clark addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); 2728bb0daffSRob Clark if (!addrs) { 2738bb0daffSRob Clark ret = -ENOMEM; 2748bb0daffSRob Clark goto free_pages; 2758bb0daffSRob Clark } 2768bb0daffSRob Clark } 2778bb0daffSRob Clark 2788bb0daffSRob Clark omap_obj->addrs = addrs; 2798bb0daffSRob Clark omap_obj->pages = pages; 2808bb0daffSRob Clark 2818bb0daffSRob Clark return 0; 2828bb0daffSRob Clark 2838bb0daffSRob Clark free_pages: 284ddcd09d6SRob Clark drm_gem_put_pages(obj, pages, true, false); 2858bb0daffSRob Clark 2868bb0daffSRob Clark return ret; 2878bb0daffSRob Clark } 2888bb0daffSRob Clark 289b902f8f4SLaurent Pinchart /* acquire pages when needed (for example, for DMA where physically 290b902f8f4SLaurent Pinchart * contiguous buffer is not required 291b902f8f4SLaurent Pinchart */ 292b902f8f4SLaurent Pinchart static int get_pages(struct drm_gem_object *obj, struct page ***pages) 293b902f8f4SLaurent Pinchart { 294b902f8f4SLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj); 295b902f8f4SLaurent Pinchart int ret = 0; 296b902f8f4SLaurent Pinchart 297b902f8f4SLaurent Pinchart if (is_shmem(obj) && !omap_obj->pages) { 298b902f8f4SLaurent Pinchart ret = omap_gem_attach_pages(obj); 299b902f8f4SLaurent Pinchart if (ret) { 300b902f8f4SLaurent Pinchart dev_err(obj->dev->dev, "could not attach pages\n"); 301b902f8f4SLaurent Pinchart return ret; 302b902f8f4SLaurent Pinchart } 303b902f8f4SLaurent Pinchart } 304b902f8f4SLaurent Pinchart 305b902f8f4SLaurent Pinchart /* TODO: even phys-contig.. we should have a list of pages? */ 306b902f8f4SLaurent Pinchart *pages = omap_obj->pages; 307b902f8f4SLaurent Pinchart 308b902f8f4SLaurent Pinchart return 0; 309b902f8f4SLaurent Pinchart } 310b902f8f4SLaurent Pinchart 3118bb0daffSRob Clark /** release backing pages */ 3128bb0daffSRob Clark static void omap_gem_detach_pages(struct drm_gem_object *obj) 3138bb0daffSRob Clark { 3148bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3158bb0daffSRob Clark 3168bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because 3178bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent: 3188bb0daffSRob Clark */ 3198bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 3208bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT; 3218bb0daffSRob Clark for (i = 0; i < npages; i++) { 3228bb0daffSRob Clark dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], 3238bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 3248bb0daffSRob Clark } 3258bb0daffSRob Clark } 3268bb0daffSRob Clark 3278bb0daffSRob Clark kfree(omap_obj->addrs); 3288bb0daffSRob Clark omap_obj->addrs = NULL; 3298bb0daffSRob Clark 330ddcd09d6SRob Clark drm_gem_put_pages(obj, omap_obj->pages, true, false); 3318bb0daffSRob Clark omap_obj->pages = NULL; 3328bb0daffSRob Clark } 3338bb0daffSRob Clark 3348bb0daffSRob Clark /* get buffer flags */ 3358bb0daffSRob Clark uint32_t omap_gem_flags(struct drm_gem_object *obj) 3368bb0daffSRob Clark { 3378bb0daffSRob Clark return to_omap_bo(obj)->flags; 3388bb0daffSRob Clark } 3398bb0daffSRob Clark 3408bb0daffSRob Clark uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 3418bb0daffSRob Clark { 3428bb0daffSRob Clark uint64_t offset; 3438bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 3448bb0daffSRob Clark offset = mmap_offset(obj); 3458bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 3468bb0daffSRob Clark return offset; 3478bb0daffSRob Clark } 3488bb0daffSRob Clark 3498bb0daffSRob Clark /** get mmap size */ 3508bb0daffSRob Clark size_t omap_gem_mmap_size(struct drm_gem_object *obj) 3518bb0daffSRob Clark { 3528bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3538bb0daffSRob Clark size_t size = obj->size; 3548bb0daffSRob Clark 3558bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 3568bb0daffSRob Clark /* for tiled buffers, the virtual size has stride rounded up 3578bb0daffSRob Clark * to 4kb.. (to hide the fact that row n+1 might start 16kb or 3588bb0daffSRob Clark * 32kb later!). But we don't back the entire buffer with 3598bb0daffSRob Clark * pages, only the valid picture part.. so need to adjust for 3608bb0daffSRob Clark * this in the size used to mmap and generate mmap offset 3618bb0daffSRob Clark */ 3628bb0daffSRob Clark size = tiler_vsize(gem2fmt(omap_obj->flags), 3638bb0daffSRob Clark omap_obj->width, omap_obj->height); 3648bb0daffSRob Clark } 3658bb0daffSRob Clark 3668bb0daffSRob Clark return size; 3678bb0daffSRob Clark } 3688bb0daffSRob Clark 3698bb0daffSRob Clark /* get tiled size, returns -EINVAL if not tiled buffer */ 3708bb0daffSRob Clark int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) 3718bb0daffSRob Clark { 3728bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3738bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 3748bb0daffSRob Clark *w = omap_obj->width; 3758bb0daffSRob Clark *h = omap_obj->height; 3768bb0daffSRob Clark return 0; 3778bb0daffSRob Clark } 3788bb0daffSRob Clark return -EINVAL; 3798bb0daffSRob Clark } 3808bb0daffSRob Clark 3817ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 3827ef93b0aSLaurent Pinchart * Fault Handling 3837ef93b0aSLaurent Pinchart */ 3847ef93b0aSLaurent Pinchart 3858bb0daffSRob Clark /* Normal handling for the case of faulting in non-tiled buffers */ 3868bb0daffSRob Clark static int fault_1d(struct drm_gem_object *obj, 3878bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 3888bb0daffSRob Clark { 3898bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3908bb0daffSRob Clark unsigned long pfn; 3918bb0daffSRob Clark pgoff_t pgoff; 3928bb0daffSRob Clark 3938bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 3948bb0daffSRob Clark pgoff = ((unsigned long)vmf->virtual_address - 3958bb0daffSRob Clark vma->vm_start) >> PAGE_SHIFT; 3968bb0daffSRob Clark 3978bb0daffSRob Clark if (omap_obj->pages) { 3988bb0daffSRob Clark omap_gem_cpu_sync(obj, pgoff); 3998bb0daffSRob Clark pfn = page_to_pfn(omap_obj->pages[pgoff]); 4008bb0daffSRob Clark } else { 4018bb0daffSRob Clark BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); 4028bb0daffSRob Clark pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; 4038bb0daffSRob Clark } 4048bb0daffSRob Clark 4058bb0daffSRob Clark VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 4068bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 4078bb0daffSRob Clark 4088bb0daffSRob Clark return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 4098bb0daffSRob Clark } 4108bb0daffSRob Clark 4118bb0daffSRob Clark /* Special handling for the case of faulting in 2d tiled buffers */ 4128bb0daffSRob Clark static int fault_2d(struct drm_gem_object *obj, 4138bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 4148bb0daffSRob Clark { 4158bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 416f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 417f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 4188bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 4198bb0daffSRob Clark struct page *pages[64]; /* XXX is this too much to have on stack? */ 4208bb0daffSRob Clark unsigned long pfn; 4218bb0daffSRob Clark pgoff_t pgoff, base_pgoff; 4228bb0daffSRob Clark void __user *vaddr; 4238bb0daffSRob Clark int i, ret, slots; 4248bb0daffSRob Clark 4258bb0daffSRob Clark /* 4268bb0daffSRob Clark * Note the height of the slot is also equal to the number of pages 4278bb0daffSRob Clark * that need to be mapped in to fill 4kb wide CPU page. If the slot 4288bb0daffSRob Clark * height is 64, then 64 pages fill a 4kb wide by 64 row region. 4298bb0daffSRob Clark */ 430f4302747SLaurent Pinchart const int n = priv->usergart[fmt].height; 431f4302747SLaurent Pinchart const int n_shift = priv->usergart[fmt].height_shift; 4328bb0daffSRob Clark 4338bb0daffSRob Clark /* 4348bb0daffSRob Clark * If buffer width in bytes > PAGE_SIZE then the virtual stride is 4358bb0daffSRob Clark * rounded up to next multiple of PAGE_SIZE.. this need to be taken 4368bb0daffSRob Clark * into account in some of the math, so figure out virtual stride 4378bb0daffSRob Clark * in pages 4388bb0daffSRob Clark */ 4398bb0daffSRob Clark const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 4408bb0daffSRob Clark 4418bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 4428bb0daffSRob Clark pgoff = ((unsigned long)vmf->virtual_address - 4438bb0daffSRob Clark vma->vm_start) >> PAGE_SHIFT; 4448bb0daffSRob Clark 4458bb0daffSRob Clark /* 4468bb0daffSRob Clark * Actual address we start mapping at is rounded down to previous slot 4478bb0daffSRob Clark * boundary in the y direction: 4488bb0daffSRob Clark */ 4498bb0daffSRob Clark base_pgoff = round_down(pgoff, m << n_shift); 4508bb0daffSRob Clark 4518bb0daffSRob Clark /* figure out buffer width in slots */ 452f4302747SLaurent Pinchart slots = omap_obj->width >> priv->usergart[fmt].slot_shift; 4538bb0daffSRob Clark 4548bb0daffSRob Clark vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); 4558bb0daffSRob Clark 456f4302747SLaurent Pinchart entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; 4578bb0daffSRob Clark 4588bb0daffSRob Clark /* evict previous buffer using this usergart entry, if any: */ 4598bb0daffSRob Clark if (entry->obj) 4608bb0daffSRob Clark evict_entry(entry->obj, fmt, entry); 4618bb0daffSRob Clark 4628bb0daffSRob Clark entry->obj = obj; 4638bb0daffSRob Clark entry->obj_pgoff = base_pgoff; 4648bb0daffSRob Clark 4658bb0daffSRob Clark /* now convert base_pgoff to phys offset from virt offset: */ 4668bb0daffSRob Clark base_pgoff = (base_pgoff >> n_shift) * slots; 4678bb0daffSRob Clark 4688bb0daffSRob Clark /* for wider-than 4k.. figure out which part of the slot-row we want: */ 4698bb0daffSRob Clark if (m > 1) { 4708bb0daffSRob Clark int off = pgoff % m; 4718bb0daffSRob Clark entry->obj_pgoff += off; 4728bb0daffSRob Clark base_pgoff /= m; 4738bb0daffSRob Clark slots = min(slots - (off << n_shift), n); 4748bb0daffSRob Clark base_pgoff += off << n_shift; 4758bb0daffSRob Clark vaddr += off << PAGE_SHIFT; 4768bb0daffSRob Clark } 4778bb0daffSRob Clark 4788bb0daffSRob Clark /* 4798bb0daffSRob Clark * Map in pages. Beyond the valid pixel part of the buffer, we set 4808bb0daffSRob Clark * pages[i] to NULL to get a dummy page mapped in.. if someone 4818bb0daffSRob Clark * reads/writes it they will get random/undefined content, but at 4828bb0daffSRob Clark * least it won't be corrupting whatever other random page used to 4838bb0daffSRob Clark * be mapped in, or other undefined behavior. 4848bb0daffSRob Clark */ 4858bb0daffSRob Clark memcpy(pages, &omap_obj->pages[base_pgoff], 4868bb0daffSRob Clark sizeof(struct page *) * slots); 4878bb0daffSRob Clark memset(pages + slots, 0, 4888bb0daffSRob Clark sizeof(struct page *) * (n - slots)); 4898bb0daffSRob Clark 4908bb0daffSRob Clark ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 4918bb0daffSRob Clark if (ret) { 4928bb0daffSRob Clark dev_err(obj->dev->dev, "failed to pin: %d\n", ret); 4938bb0daffSRob Clark return ret; 4948bb0daffSRob Clark } 4958bb0daffSRob Clark 4968bb0daffSRob Clark pfn = entry->paddr >> PAGE_SHIFT; 4978bb0daffSRob Clark 4988bb0daffSRob Clark VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 4998bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 5008bb0daffSRob Clark 5018bb0daffSRob Clark for (i = n; i > 0; i--) { 5028bb0daffSRob Clark vm_insert_mixed(vma, (unsigned long)vaddr, pfn); 503f4302747SLaurent Pinchart pfn += priv->usergart[fmt].stride_pfn; 5048bb0daffSRob Clark vaddr += PAGE_SIZE * m; 5058bb0daffSRob Clark } 5068bb0daffSRob Clark 5078bb0daffSRob Clark /* simple round-robin: */ 508f4302747SLaurent Pinchart priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) 509f4302747SLaurent Pinchart % NUM_USERGART_ENTRIES; 5108bb0daffSRob Clark 5118bb0daffSRob Clark return 0; 5128bb0daffSRob Clark } 5138bb0daffSRob Clark 5148bb0daffSRob Clark /** 5158bb0daffSRob Clark * omap_gem_fault - pagefault handler for GEM objects 5168bb0daffSRob Clark * @vma: the VMA of the GEM object 5178bb0daffSRob Clark * @vmf: fault detail 5188bb0daffSRob Clark * 5198bb0daffSRob Clark * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 5208bb0daffSRob Clark * does most of the work for us including the actual map/unmap calls 5218bb0daffSRob Clark * but we need to do the actual page work. 5228bb0daffSRob Clark * 5238bb0daffSRob Clark * The VMA was set up by GEM. In doing so it also ensured that the 5248bb0daffSRob Clark * vma->vm_private_data points to the GEM object that is backing this 5258bb0daffSRob Clark * mapping. 5268bb0daffSRob Clark */ 5278bb0daffSRob Clark int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 5288bb0daffSRob Clark { 5298bb0daffSRob Clark struct drm_gem_object *obj = vma->vm_private_data; 5308bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 5318bb0daffSRob Clark struct drm_device *dev = obj->dev; 5328bb0daffSRob Clark struct page **pages; 5338bb0daffSRob Clark int ret; 5348bb0daffSRob Clark 5358bb0daffSRob Clark /* Make sure we don't parallel update on a fault, nor move or remove 5368bb0daffSRob Clark * something from beneath our feet 5378bb0daffSRob Clark */ 5388bb0daffSRob Clark mutex_lock(&dev->struct_mutex); 5398bb0daffSRob Clark 5408bb0daffSRob Clark /* if a shmem backed object, make sure we have pages attached now */ 5418bb0daffSRob Clark ret = get_pages(obj, &pages); 5428bb0daffSRob Clark if (ret) 5438bb0daffSRob Clark goto fail; 5448bb0daffSRob Clark 5458bb0daffSRob Clark /* where should we do corresponding put_pages().. we are mapping 5468bb0daffSRob Clark * the original page, rather than thru a GART, so we can't rely 5478bb0daffSRob Clark * on eviction to trigger this. But munmap() or all mappings should 5488bb0daffSRob Clark * probably trigger put_pages()? 5498bb0daffSRob Clark */ 5508bb0daffSRob Clark 5518bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) 5528bb0daffSRob Clark ret = fault_2d(obj, vma, vmf); 5538bb0daffSRob Clark else 5548bb0daffSRob Clark ret = fault_1d(obj, vma, vmf); 5558bb0daffSRob Clark 5568bb0daffSRob Clark 5578bb0daffSRob Clark fail: 5588bb0daffSRob Clark mutex_unlock(&dev->struct_mutex); 5598bb0daffSRob Clark switch (ret) { 5608bb0daffSRob Clark case 0: 5618bb0daffSRob Clark case -ERESTARTSYS: 5628bb0daffSRob Clark case -EINTR: 5638bb0daffSRob Clark return VM_FAULT_NOPAGE; 5648bb0daffSRob Clark case -ENOMEM: 5658bb0daffSRob Clark return VM_FAULT_OOM; 5668bb0daffSRob Clark default: 5678bb0daffSRob Clark return VM_FAULT_SIGBUS; 5688bb0daffSRob Clark } 5698bb0daffSRob Clark } 5708bb0daffSRob Clark 5718bb0daffSRob Clark /** We override mainly to fix up some of the vm mapping flags.. */ 5728bb0daffSRob Clark int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 5738bb0daffSRob Clark { 5748bb0daffSRob Clark int ret; 5758bb0daffSRob Clark 5768bb0daffSRob Clark ret = drm_gem_mmap(filp, vma); 5778bb0daffSRob Clark if (ret) { 5788bb0daffSRob Clark DBG("mmap failed: %d", ret); 5798bb0daffSRob Clark return ret; 5808bb0daffSRob Clark } 5818bb0daffSRob Clark 5828bb0daffSRob Clark return omap_gem_mmap_obj(vma->vm_private_data, vma); 5838bb0daffSRob Clark } 5848bb0daffSRob Clark 5858bb0daffSRob Clark int omap_gem_mmap_obj(struct drm_gem_object *obj, 5868bb0daffSRob Clark struct vm_area_struct *vma) 5878bb0daffSRob Clark { 5888bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 5898bb0daffSRob Clark 5908bb0daffSRob Clark vma->vm_flags &= ~VM_PFNMAP; 5918bb0daffSRob Clark vma->vm_flags |= VM_MIXEDMAP; 5928bb0daffSRob Clark 5938bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_WC) { 5948bb0daffSRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 5958bb0daffSRob Clark } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 5968bb0daffSRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 5978bb0daffSRob Clark } else { 5988bb0daffSRob Clark /* 5998bb0daffSRob Clark * We do have some private objects, at least for scanout buffers 6008bb0daffSRob Clark * on hardware without DMM/TILER. But these are allocated write- 6018bb0daffSRob Clark * combine 6028bb0daffSRob Clark */ 6038bb0daffSRob Clark if (WARN_ON(!obj->filp)) 6048bb0daffSRob Clark return -EINVAL; 6058bb0daffSRob Clark 6068bb0daffSRob Clark /* 6078bb0daffSRob Clark * Shunt off cached objs to shmem file so they have their own 6088bb0daffSRob Clark * address_space (so unmap_mapping_range does what we want, 6098bb0daffSRob Clark * in particular in the case of mmap'd dmabufs) 6108bb0daffSRob Clark */ 6118bb0daffSRob Clark fput(vma->vm_file); 6128bb0daffSRob Clark vma->vm_pgoff = 0; 6138bb0daffSRob Clark vma->vm_file = get_file(obj->filp); 6148bb0daffSRob Clark 6158bb0daffSRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 6168bb0daffSRob Clark } 6178bb0daffSRob Clark 6188bb0daffSRob Clark return 0; 6198bb0daffSRob Clark } 6208bb0daffSRob Clark 6217ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 6227ef93b0aSLaurent Pinchart * Dumb Buffers 6237ef93b0aSLaurent Pinchart */ 6248bb0daffSRob Clark 6258bb0daffSRob Clark /** 6268bb0daffSRob Clark * omap_gem_dumb_create - create a dumb buffer 6278bb0daffSRob Clark * @drm_file: our client file 6288bb0daffSRob Clark * @dev: our device 6298bb0daffSRob Clark * @args: the requested arguments copied from userspace 6308bb0daffSRob Clark * 6318bb0daffSRob Clark * Allocate a buffer suitable for use for a frame buffer of the 6328bb0daffSRob Clark * form described by user space. Give userspace a handle by which 6338bb0daffSRob Clark * to reference it. 6348bb0daffSRob Clark */ 6358bb0daffSRob Clark int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 6368bb0daffSRob Clark struct drm_mode_create_dumb *args) 6378bb0daffSRob Clark { 6388bb0daffSRob Clark union omap_gem_size gsize; 6398bb0daffSRob Clark 640bdb2b933SThierry Reding args->pitch = align_pitch(0, args->width, args->bpp); 6418bb0daffSRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 6428bb0daffSRob Clark 6438bb0daffSRob Clark gsize = (union omap_gem_size){ 6448bb0daffSRob Clark .bytes = args->size, 6458bb0daffSRob Clark }; 6468bb0daffSRob Clark 6478bb0daffSRob Clark return omap_gem_new_handle(dev, file, gsize, 6488bb0daffSRob Clark OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 6498bb0daffSRob Clark } 6508bb0daffSRob Clark 6518bb0daffSRob Clark /** 6528bb0daffSRob Clark * omap_gem_dumb_map - buffer mapping for dumb interface 6538bb0daffSRob Clark * @file: our drm client file 6548bb0daffSRob Clark * @dev: drm device 6558bb0daffSRob Clark * @handle: GEM handle to the object (from dumb_create) 6568bb0daffSRob Clark * 6578bb0daffSRob Clark * Do the necessary setup to allow the mapping of the frame buffer 6588bb0daffSRob Clark * into user memory. We don't have to do much here at the moment. 6598bb0daffSRob Clark */ 6608bb0daffSRob Clark int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 6618bb0daffSRob Clark uint32_t handle, uint64_t *offset) 6628bb0daffSRob Clark { 6638bb0daffSRob Clark struct drm_gem_object *obj; 6648bb0daffSRob Clark int ret = 0; 6658bb0daffSRob Clark 6668bb0daffSRob Clark /* GEM does all our handle to object mapping */ 6678bb0daffSRob Clark obj = drm_gem_object_lookup(dev, file, handle); 6688bb0daffSRob Clark if (obj == NULL) { 6698bb0daffSRob Clark ret = -ENOENT; 6708bb0daffSRob Clark goto fail; 6718bb0daffSRob Clark } 6728bb0daffSRob Clark 6738bb0daffSRob Clark *offset = omap_gem_mmap_offset(obj); 6748bb0daffSRob Clark 6758bb0daffSRob Clark drm_gem_object_unreference_unlocked(obj); 6768bb0daffSRob Clark 6778bb0daffSRob Clark fail: 6788bb0daffSRob Clark return ret; 6798bb0daffSRob Clark } 6808bb0daffSRob Clark 681e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 6828bb0daffSRob Clark /* Set scrolling position. This allows us to implement fast scrolling 6838bb0daffSRob Clark * for console. 6848bb0daffSRob Clark * 6858bb0daffSRob Clark * Call only from non-atomic contexts. 6868bb0daffSRob Clark */ 6878bb0daffSRob Clark int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 6888bb0daffSRob Clark { 6898bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 6908bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 6918bb0daffSRob Clark int ret = 0; 6928bb0daffSRob Clark 6938bb0daffSRob Clark if (roll > npages) { 6948bb0daffSRob Clark dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 6958bb0daffSRob Clark return -EINVAL; 6968bb0daffSRob Clark } 6978bb0daffSRob Clark 6988bb0daffSRob Clark omap_obj->roll = roll; 6998bb0daffSRob Clark 7008bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 7018bb0daffSRob Clark 7028bb0daffSRob Clark /* if we aren't mapped yet, we don't need to do anything */ 7038bb0daffSRob Clark if (omap_obj->block) { 7048bb0daffSRob Clark struct page **pages; 7058bb0daffSRob Clark ret = get_pages(obj, &pages); 7068bb0daffSRob Clark if (ret) 7078bb0daffSRob Clark goto fail; 7088bb0daffSRob Clark ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 7098bb0daffSRob Clark if (ret) 7108bb0daffSRob Clark dev_err(obj->dev->dev, "could not repin: %d\n", ret); 7118bb0daffSRob Clark } 7128bb0daffSRob Clark 7138bb0daffSRob Clark fail: 7148bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 7158bb0daffSRob Clark 7168bb0daffSRob Clark return ret; 7178bb0daffSRob Clark } 718e1c1174fSLaurent Pinchart #endif 7198bb0daffSRob Clark 7207ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 7217ef93b0aSLaurent Pinchart * Memory Management & DMA Sync 7227ef93b0aSLaurent Pinchart */ 7237ef93b0aSLaurent Pinchart 7247ef93b0aSLaurent Pinchart /** 7257ef93b0aSLaurent Pinchart * shmem buffers that are mapped cached can simulate coherency via using 7267ef93b0aSLaurent Pinchart * page faulting to keep track of dirty pages 7277ef93b0aSLaurent Pinchart */ 7287ef93b0aSLaurent Pinchart static inline bool is_cached_coherent(struct drm_gem_object *obj) 7297ef93b0aSLaurent Pinchart { 7307ef93b0aSLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj); 7317ef93b0aSLaurent Pinchart return is_shmem(obj) && 7327ef93b0aSLaurent Pinchart ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); 7337ef93b0aSLaurent Pinchart } 7347ef93b0aSLaurent Pinchart 7358bb0daffSRob Clark /* Sync the buffer for CPU access.. note pages should already be 7368bb0daffSRob Clark * attached, ie. omap_gem_get_pages() 7378bb0daffSRob Clark */ 7388bb0daffSRob Clark void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) 7398bb0daffSRob Clark { 7408bb0daffSRob Clark struct drm_device *dev = obj->dev; 7418bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7428bb0daffSRob Clark 7438bb0daffSRob Clark if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { 7448bb0daffSRob Clark dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], 7458bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 7468bb0daffSRob Clark omap_obj->addrs[pgoff] = 0; 7478bb0daffSRob Clark } 7488bb0daffSRob Clark } 7498bb0daffSRob Clark 7508bb0daffSRob Clark /* sync the buffer for DMA access */ 7518bb0daffSRob Clark void omap_gem_dma_sync(struct drm_gem_object *obj, 7528bb0daffSRob Clark enum dma_data_direction dir) 7538bb0daffSRob Clark { 7548bb0daffSRob Clark struct drm_device *dev = obj->dev; 7558bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7568bb0daffSRob Clark 7578bb0daffSRob Clark if (is_cached_coherent(obj)) { 7588bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT; 7598bb0daffSRob Clark struct page **pages = omap_obj->pages; 7608bb0daffSRob Clark bool dirty = false; 7618bb0daffSRob Clark 7628bb0daffSRob Clark for (i = 0; i < npages; i++) { 7638bb0daffSRob Clark if (!omap_obj->addrs[i]) { 7648bb0daffSRob Clark omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, 7658bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 7668bb0daffSRob Clark dirty = true; 7678bb0daffSRob Clark } 7688bb0daffSRob Clark } 7698bb0daffSRob Clark 7708bb0daffSRob Clark if (dirty) { 7718bb0daffSRob Clark unmap_mapping_range(obj->filp->f_mapping, 0, 7728bb0daffSRob Clark omap_gem_mmap_size(obj), 1); 7738bb0daffSRob Clark } 7748bb0daffSRob Clark } 7758bb0daffSRob Clark } 7768bb0daffSRob Clark 7778bb0daffSRob Clark /* Get physical address for DMA.. if 'remap' is true, and the buffer is not 7788bb0daffSRob Clark * already contiguous, remap it to pin in physically contiguous memory.. (ie. 7798bb0daffSRob Clark * map in TILER) 7808bb0daffSRob Clark */ 7818bb0daffSRob Clark int omap_gem_get_paddr(struct drm_gem_object *obj, 7828bb0daffSRob Clark dma_addr_t *paddr, bool remap) 7838bb0daffSRob Clark { 7848bb0daffSRob Clark struct omap_drm_private *priv = obj->dev->dev_private; 7858bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7868bb0daffSRob Clark int ret = 0; 7878bb0daffSRob Clark 7888bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 7898bb0daffSRob Clark 7908bb0daffSRob Clark if (remap && is_shmem(obj) && priv->has_dmm) { 7918bb0daffSRob Clark if (omap_obj->paddr_cnt == 0) { 7928bb0daffSRob Clark struct page **pages; 7938bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 7948bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 7958bb0daffSRob Clark struct tiler_block *block; 7968bb0daffSRob Clark 7978bb0daffSRob Clark BUG_ON(omap_obj->block); 7988bb0daffSRob Clark 7998bb0daffSRob Clark ret = get_pages(obj, &pages); 8008bb0daffSRob Clark if (ret) 8018bb0daffSRob Clark goto fail; 8028bb0daffSRob Clark 8038bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 8048bb0daffSRob Clark block = tiler_reserve_2d(fmt, 8058bb0daffSRob Clark omap_obj->width, 8068bb0daffSRob Clark omap_obj->height, 0); 8078bb0daffSRob Clark } else { 8088bb0daffSRob Clark block = tiler_reserve_1d(obj->size); 8098bb0daffSRob Clark } 8108bb0daffSRob Clark 8118bb0daffSRob Clark if (IS_ERR(block)) { 8128bb0daffSRob Clark ret = PTR_ERR(block); 8138bb0daffSRob Clark dev_err(obj->dev->dev, 8148bb0daffSRob Clark "could not remap: %d (%d)\n", ret, fmt); 8158bb0daffSRob Clark goto fail; 8168bb0daffSRob Clark } 8178bb0daffSRob Clark 8188bb0daffSRob Clark /* TODO: enable async refill.. */ 8198bb0daffSRob Clark ret = tiler_pin(block, pages, npages, 8208bb0daffSRob Clark omap_obj->roll, true); 8218bb0daffSRob Clark if (ret) { 8228bb0daffSRob Clark tiler_release(block); 8238bb0daffSRob Clark dev_err(obj->dev->dev, 8248bb0daffSRob Clark "could not pin: %d\n", ret); 8258bb0daffSRob Clark goto fail; 8268bb0daffSRob Clark } 8278bb0daffSRob Clark 8288bb0daffSRob Clark omap_obj->paddr = tiler_ssptr(block); 8298bb0daffSRob Clark omap_obj->block = block; 8308bb0daffSRob Clark 8312d31ca3aSRussell King DBG("got paddr: %pad", &omap_obj->paddr); 8328bb0daffSRob Clark } 8338bb0daffSRob Clark 8348bb0daffSRob Clark omap_obj->paddr_cnt++; 8358bb0daffSRob Clark 8368bb0daffSRob Clark *paddr = omap_obj->paddr; 8378bb0daffSRob Clark } else if (omap_obj->flags & OMAP_BO_DMA) { 8388bb0daffSRob Clark *paddr = omap_obj->paddr; 8398bb0daffSRob Clark } else { 8408bb0daffSRob Clark ret = -EINVAL; 8418bb0daffSRob Clark goto fail; 8428bb0daffSRob Clark } 8438bb0daffSRob Clark 8448bb0daffSRob Clark fail: 8458bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 8468bb0daffSRob Clark 8478bb0daffSRob Clark return ret; 8488bb0daffSRob Clark } 8498bb0daffSRob Clark 8508bb0daffSRob Clark /* Release physical address, when DMA is no longer being performed.. this 8518bb0daffSRob Clark * could potentially unpin and unmap buffers from TILER 8528bb0daffSRob Clark */ 853393a949fSTomi Valkeinen void omap_gem_put_paddr(struct drm_gem_object *obj) 8548bb0daffSRob Clark { 8558bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 856393a949fSTomi Valkeinen int ret; 8578bb0daffSRob Clark 8588bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 8598bb0daffSRob Clark if (omap_obj->paddr_cnt > 0) { 8608bb0daffSRob Clark omap_obj->paddr_cnt--; 8618bb0daffSRob Clark if (omap_obj->paddr_cnt == 0) { 8628bb0daffSRob Clark ret = tiler_unpin(omap_obj->block); 8638bb0daffSRob Clark if (ret) { 8648bb0daffSRob Clark dev_err(obj->dev->dev, 8658bb0daffSRob Clark "could not unpin pages: %d\n", ret); 8668bb0daffSRob Clark } 8678bb0daffSRob Clark ret = tiler_release(omap_obj->block); 8688bb0daffSRob Clark if (ret) { 8698bb0daffSRob Clark dev_err(obj->dev->dev, 8708bb0daffSRob Clark "could not release unmap: %d\n", ret); 8718bb0daffSRob Clark } 8723f4d17c4STomi Valkeinen omap_obj->paddr = 0; 8738bb0daffSRob Clark omap_obj->block = NULL; 8748bb0daffSRob Clark } 8758bb0daffSRob Clark } 876393a949fSTomi Valkeinen 8778bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 8788bb0daffSRob Clark } 8798bb0daffSRob Clark 8808bb0daffSRob Clark /* Get rotated scanout address (only valid if already pinned), at the 8818bb0daffSRob Clark * specified orientation and x,y offset from top-left corner of buffer 8828bb0daffSRob Clark * (only valid for tiled 2d buffers) 8838bb0daffSRob Clark */ 8848bb0daffSRob Clark int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, 8858bb0daffSRob Clark int x, int y, dma_addr_t *paddr) 8868bb0daffSRob Clark { 8878bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 8888bb0daffSRob Clark int ret = -EINVAL; 8898bb0daffSRob Clark 8908bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 8918bb0daffSRob Clark if ((omap_obj->paddr_cnt > 0) && omap_obj->block && 8928bb0daffSRob Clark (omap_obj->flags & OMAP_BO_TILED)) { 8938bb0daffSRob Clark *paddr = tiler_tsptr(omap_obj->block, orient, x, y); 8948bb0daffSRob Clark ret = 0; 8958bb0daffSRob Clark } 8968bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 8978bb0daffSRob Clark return ret; 8988bb0daffSRob Clark } 8998bb0daffSRob Clark 9008bb0daffSRob Clark /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 9018bb0daffSRob Clark int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 9028bb0daffSRob Clark { 9038bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9048bb0daffSRob Clark int ret = -EINVAL; 9058bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) 9068bb0daffSRob Clark ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 9078bb0daffSRob Clark return ret; 9088bb0daffSRob Clark } 9098bb0daffSRob Clark 9108bb0daffSRob Clark /* if !remap, and we don't have pages backing, then fail, rather than 9118bb0daffSRob Clark * increasing the pin count (which we don't really do yet anyways, 9128bb0daffSRob Clark * because we don't support swapping pages back out). And 'remap' 9138bb0daffSRob Clark * might not be quite the right name, but I wanted to keep it working 9148bb0daffSRob Clark * similarly to omap_gem_get_paddr(). Note though that mutex is not 9158bb0daffSRob Clark * aquired if !remap (because this can be called in atomic ctxt), 9168bb0daffSRob Clark * but probably omap_gem_get_paddr() should be changed to work in the 9178bb0daffSRob Clark * same way. If !remap, a matching omap_gem_put_pages() call is not 9188bb0daffSRob Clark * required (and should not be made). 9198bb0daffSRob Clark */ 9208bb0daffSRob Clark int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 9218bb0daffSRob Clark bool remap) 9228bb0daffSRob Clark { 9238bb0daffSRob Clark int ret; 9248bb0daffSRob Clark if (!remap) { 9258bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9268bb0daffSRob Clark if (!omap_obj->pages) 9278bb0daffSRob Clark return -ENOMEM; 9288bb0daffSRob Clark *pages = omap_obj->pages; 9298bb0daffSRob Clark return 0; 9308bb0daffSRob Clark } 9318bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 9328bb0daffSRob Clark ret = get_pages(obj, pages); 9338bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 9348bb0daffSRob Clark return ret; 9358bb0daffSRob Clark } 9368bb0daffSRob Clark 9378bb0daffSRob Clark /* release pages when DMA no longer being performed */ 9388bb0daffSRob Clark int omap_gem_put_pages(struct drm_gem_object *obj) 9398bb0daffSRob Clark { 9408bb0daffSRob Clark /* do something here if we dynamically attach/detach pages.. at 9418bb0daffSRob Clark * least they would no longer need to be pinned if everyone has 9428bb0daffSRob Clark * released the pages.. 9438bb0daffSRob Clark */ 9448bb0daffSRob Clark return 0; 9458bb0daffSRob Clark } 9468bb0daffSRob Clark 947e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 9488bb0daffSRob Clark /* Get kernel virtual address for CPU access.. this more or less only 9498bb0daffSRob Clark * exists for omap_fbdev. This should be called with struct_mutex 9508bb0daffSRob Clark * held. 9518bb0daffSRob Clark */ 9528bb0daffSRob Clark void *omap_gem_vaddr(struct drm_gem_object *obj) 9538bb0daffSRob Clark { 9548bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9558bb0daffSRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 9568bb0daffSRob Clark if (!omap_obj->vaddr) { 9578bb0daffSRob Clark struct page **pages; 9588bb0daffSRob Clark int ret = get_pages(obj, &pages); 9598bb0daffSRob Clark if (ret) 9608bb0daffSRob Clark return ERR_PTR(ret); 9618bb0daffSRob Clark omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 9628bb0daffSRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 9638bb0daffSRob Clark } 9648bb0daffSRob Clark return omap_obj->vaddr; 9658bb0daffSRob Clark } 966e1c1174fSLaurent Pinchart #endif 9678bb0daffSRob Clark 9687ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 9697ef93b0aSLaurent Pinchart * Power Management 9707ef93b0aSLaurent Pinchart */ 9717ef93b0aSLaurent Pinchart 9728bb0daffSRob Clark #ifdef CONFIG_PM 9738bb0daffSRob Clark /* re-pin objects in DMM in resume path: */ 9748bb0daffSRob Clark int omap_gem_resume(struct device *dev) 9758bb0daffSRob Clark { 9768bb0daffSRob Clark struct drm_device *drm_dev = dev_get_drvdata(dev); 9778bb0daffSRob Clark struct omap_drm_private *priv = drm_dev->dev_private; 9788bb0daffSRob Clark struct omap_gem_object *omap_obj; 9798bb0daffSRob Clark int ret = 0; 9808bb0daffSRob Clark 9818bb0daffSRob Clark list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 9828bb0daffSRob Clark if (omap_obj->block) { 9838bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 9848bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 9858bb0daffSRob Clark WARN_ON(!omap_obj->pages); /* this can't happen */ 9868bb0daffSRob Clark ret = tiler_pin(omap_obj->block, 9878bb0daffSRob Clark omap_obj->pages, npages, 9888bb0daffSRob Clark omap_obj->roll, true); 9898bb0daffSRob Clark if (ret) { 9908bb0daffSRob Clark dev_err(dev, "could not repin: %d\n", ret); 9918bb0daffSRob Clark return ret; 9928bb0daffSRob Clark } 9938bb0daffSRob Clark } 9948bb0daffSRob Clark } 9958bb0daffSRob Clark 9968bb0daffSRob Clark return 0; 9978bb0daffSRob Clark } 9988bb0daffSRob Clark #endif 9998bb0daffSRob Clark 10007ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 10017ef93b0aSLaurent Pinchart * DebugFS 10027ef93b0aSLaurent Pinchart */ 10037ef93b0aSLaurent Pinchart 10048bb0daffSRob Clark #ifdef CONFIG_DEBUG_FS 10058bb0daffSRob Clark void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 10068bb0daffSRob Clark { 10078bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 10080de23977SDavid Herrmann uint64_t off; 10098bb0daffSRob Clark 10100de23977SDavid Herrmann off = drm_vma_node_start(&obj->vma_node); 10118bb0daffSRob Clark 10122d31ca3aSRussell King seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 10138bb0daffSRob Clark omap_obj->flags, obj->name, obj->refcount.refcount.counter, 10142d31ca3aSRussell King off, &omap_obj->paddr, omap_obj->paddr_cnt, 10158bb0daffSRob Clark omap_obj->vaddr, omap_obj->roll); 10168bb0daffSRob Clark 10178bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 10188bb0daffSRob Clark seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 10198bb0daffSRob Clark if (omap_obj->block) { 10208bb0daffSRob Clark struct tcm_area *area = &omap_obj->block->area; 10218bb0daffSRob Clark seq_printf(m, " (%dx%d, %dx%d)", 10228bb0daffSRob Clark area->p0.x, area->p0.y, 10238bb0daffSRob Clark area->p1.x, area->p1.y); 10248bb0daffSRob Clark } 10258bb0daffSRob Clark } else { 10268bb0daffSRob Clark seq_printf(m, " %d", obj->size); 10278bb0daffSRob Clark } 10288bb0daffSRob Clark 10298bb0daffSRob Clark seq_printf(m, "\n"); 10308bb0daffSRob Clark } 10318bb0daffSRob Clark 10328bb0daffSRob Clark void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 10338bb0daffSRob Clark { 10348bb0daffSRob Clark struct omap_gem_object *omap_obj; 10358bb0daffSRob Clark int count = 0; 10368bb0daffSRob Clark size_t size = 0; 10378bb0daffSRob Clark 10388bb0daffSRob Clark list_for_each_entry(omap_obj, list, mm_list) { 10398bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 10408bb0daffSRob Clark seq_printf(m, " "); 10418bb0daffSRob Clark omap_gem_describe(obj, m); 10428bb0daffSRob Clark count++; 10438bb0daffSRob Clark size += obj->size; 10448bb0daffSRob Clark } 10458bb0daffSRob Clark 10468bb0daffSRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 10478bb0daffSRob Clark } 10488bb0daffSRob Clark #endif 10498bb0daffSRob Clark 10507ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 10517ef93b0aSLaurent Pinchart * Buffer Synchronization 10528bb0daffSRob Clark */ 10538bb0daffSRob Clark 10547ef93b0aSLaurent Pinchart static DEFINE_SPINLOCK(sync_lock); 10557ef93b0aSLaurent Pinchart 10568bb0daffSRob Clark struct omap_gem_sync_waiter { 10578bb0daffSRob Clark struct list_head list; 10588bb0daffSRob Clark struct omap_gem_object *omap_obj; 10598bb0daffSRob Clark enum omap_gem_op op; 10608bb0daffSRob Clark uint32_t read_target, write_target; 10618bb0daffSRob Clark /* notify called w/ sync_lock held */ 10628bb0daffSRob Clark void (*notify)(void *arg); 10638bb0daffSRob Clark void *arg; 10648bb0daffSRob Clark }; 10658bb0daffSRob Clark 10668bb0daffSRob Clark /* list of omap_gem_sync_waiter.. the notify fxn gets called back when 10678bb0daffSRob Clark * the read and/or write target count is achieved which can call a user 10688bb0daffSRob Clark * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for 10698bb0daffSRob Clark * cpu access), etc. 10708bb0daffSRob Clark */ 10718bb0daffSRob Clark static LIST_HEAD(waiters); 10728bb0daffSRob Clark 10738bb0daffSRob Clark static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) 10748bb0daffSRob Clark { 10758bb0daffSRob Clark struct omap_gem_object *omap_obj = waiter->omap_obj; 10768bb0daffSRob Clark if ((waiter->op & OMAP_GEM_READ) && 1077f2cff0f3SArchit Taneja (omap_obj->sync->write_complete < waiter->write_target)) 10788bb0daffSRob Clark return true; 10798bb0daffSRob Clark if ((waiter->op & OMAP_GEM_WRITE) && 1080f2cff0f3SArchit Taneja (omap_obj->sync->read_complete < waiter->read_target)) 10818bb0daffSRob Clark return true; 10828bb0daffSRob Clark return false; 10838bb0daffSRob Clark } 10848bb0daffSRob Clark 10858bb0daffSRob Clark /* macro for sync debug.. */ 10868bb0daffSRob Clark #define SYNCDBG 0 10878bb0daffSRob Clark #define SYNC(fmt, ...) do { if (SYNCDBG) \ 10888bb0daffSRob Clark printk(KERN_ERR "%s:%d: "fmt"\n", \ 10898bb0daffSRob Clark __func__, __LINE__, ##__VA_ARGS__); \ 10908bb0daffSRob Clark } while (0) 10918bb0daffSRob Clark 10928bb0daffSRob Clark 10938bb0daffSRob Clark static void sync_op_update(void) 10948bb0daffSRob Clark { 10958bb0daffSRob Clark struct omap_gem_sync_waiter *waiter, *n; 10968bb0daffSRob Clark list_for_each_entry_safe(waiter, n, &waiters, list) { 10978bb0daffSRob Clark if (!is_waiting(waiter)) { 10988bb0daffSRob Clark list_del(&waiter->list); 10998bb0daffSRob Clark SYNC("notify: %p", waiter); 11008bb0daffSRob Clark waiter->notify(waiter->arg); 11018bb0daffSRob Clark kfree(waiter); 11028bb0daffSRob Clark } 11038bb0daffSRob Clark } 11048bb0daffSRob Clark } 11058bb0daffSRob Clark 11068bb0daffSRob Clark static inline int sync_op(struct drm_gem_object *obj, 11078bb0daffSRob Clark enum omap_gem_op op, bool start) 11088bb0daffSRob Clark { 11098bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 11108bb0daffSRob Clark int ret = 0; 11118bb0daffSRob Clark 11128bb0daffSRob Clark spin_lock(&sync_lock); 11138bb0daffSRob Clark 11148bb0daffSRob Clark if (!omap_obj->sync) { 11158bb0daffSRob Clark omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); 11168bb0daffSRob Clark if (!omap_obj->sync) { 11178bb0daffSRob Clark ret = -ENOMEM; 11188bb0daffSRob Clark goto unlock; 11198bb0daffSRob Clark } 11208bb0daffSRob Clark } 11218bb0daffSRob Clark 11228bb0daffSRob Clark if (start) { 11238bb0daffSRob Clark if (op & OMAP_GEM_READ) 11248bb0daffSRob Clark omap_obj->sync->read_pending++; 11258bb0daffSRob Clark if (op & OMAP_GEM_WRITE) 11268bb0daffSRob Clark omap_obj->sync->write_pending++; 11278bb0daffSRob Clark } else { 11288bb0daffSRob Clark if (op & OMAP_GEM_READ) 11298bb0daffSRob Clark omap_obj->sync->read_complete++; 11308bb0daffSRob Clark if (op & OMAP_GEM_WRITE) 11318bb0daffSRob Clark omap_obj->sync->write_complete++; 11328bb0daffSRob Clark sync_op_update(); 11338bb0daffSRob Clark } 11348bb0daffSRob Clark 11358bb0daffSRob Clark unlock: 11368bb0daffSRob Clark spin_unlock(&sync_lock); 11378bb0daffSRob Clark 11388bb0daffSRob Clark return ret; 11398bb0daffSRob Clark } 11408bb0daffSRob Clark 11418bb0daffSRob Clark /* it is a bit lame to handle updates in this sort of polling way, but 11428bb0daffSRob Clark * in case of PVR, the GPU can directly update read/write complete 11438bb0daffSRob Clark * values, and not really tell us which ones it updated.. this also 11448bb0daffSRob Clark * means that sync_lock is not quite sufficient. So we'll need to 11458bb0daffSRob Clark * do something a bit better when it comes time to add support for 11468bb0daffSRob Clark * separate 2d hw.. 11478bb0daffSRob Clark */ 11488bb0daffSRob Clark void omap_gem_op_update(void) 11498bb0daffSRob Clark { 11508bb0daffSRob Clark spin_lock(&sync_lock); 11518bb0daffSRob Clark sync_op_update(); 11528bb0daffSRob Clark spin_unlock(&sync_lock); 11538bb0daffSRob Clark } 11548bb0daffSRob Clark 11558bb0daffSRob Clark /* mark the start of read and/or write operation */ 11568bb0daffSRob Clark int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) 11578bb0daffSRob Clark { 11588bb0daffSRob Clark return sync_op(obj, op, true); 11598bb0daffSRob Clark } 11608bb0daffSRob Clark 11618bb0daffSRob Clark int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) 11628bb0daffSRob Clark { 11638bb0daffSRob Clark return sync_op(obj, op, false); 11648bb0daffSRob Clark } 11658bb0daffSRob Clark 11668bb0daffSRob Clark static DECLARE_WAIT_QUEUE_HEAD(sync_event); 11678bb0daffSRob Clark 11688bb0daffSRob Clark static void sync_notify(void *arg) 11698bb0daffSRob Clark { 11708bb0daffSRob Clark struct task_struct **waiter_task = arg; 11718bb0daffSRob Clark *waiter_task = NULL; 11728bb0daffSRob Clark wake_up_all(&sync_event); 11738bb0daffSRob Clark } 11748bb0daffSRob Clark 11758bb0daffSRob Clark int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) 11768bb0daffSRob Clark { 11778bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 11788bb0daffSRob Clark int ret = 0; 11798bb0daffSRob Clark if (omap_obj->sync) { 11808bb0daffSRob Clark struct task_struct *waiter_task = current; 11818bb0daffSRob Clark struct omap_gem_sync_waiter *waiter = 11828bb0daffSRob Clark kzalloc(sizeof(*waiter), GFP_KERNEL); 11838bb0daffSRob Clark 11848bb0daffSRob Clark if (!waiter) 11858bb0daffSRob Clark return -ENOMEM; 11868bb0daffSRob Clark 11878bb0daffSRob Clark waiter->omap_obj = omap_obj; 11888bb0daffSRob Clark waiter->op = op; 11898bb0daffSRob Clark waiter->read_target = omap_obj->sync->read_pending; 11908bb0daffSRob Clark waiter->write_target = omap_obj->sync->write_pending; 11918bb0daffSRob Clark waiter->notify = sync_notify; 11928bb0daffSRob Clark waiter->arg = &waiter_task; 11938bb0daffSRob Clark 11948bb0daffSRob Clark spin_lock(&sync_lock); 11958bb0daffSRob Clark if (is_waiting(waiter)) { 11968bb0daffSRob Clark SYNC("waited: %p", waiter); 11978bb0daffSRob Clark list_add_tail(&waiter->list, &waiters); 11988bb0daffSRob Clark spin_unlock(&sync_lock); 11998bb0daffSRob Clark ret = wait_event_interruptible(sync_event, 12008bb0daffSRob Clark (waiter_task == NULL)); 12018bb0daffSRob Clark spin_lock(&sync_lock); 12028bb0daffSRob Clark if (waiter_task) { 12038bb0daffSRob Clark SYNC("interrupted: %p", waiter); 12048bb0daffSRob Clark /* we were interrupted */ 12058bb0daffSRob Clark list_del(&waiter->list); 12068bb0daffSRob Clark waiter_task = NULL; 12078bb0daffSRob Clark } else { 12088bb0daffSRob Clark /* freed in sync_op_update() */ 12098bb0daffSRob Clark waiter = NULL; 12108bb0daffSRob Clark } 12118bb0daffSRob Clark } 12128bb0daffSRob Clark spin_unlock(&sync_lock); 12138bb0daffSRob Clark kfree(waiter); 12148bb0daffSRob Clark } 12158bb0daffSRob Clark return ret; 12168bb0daffSRob Clark } 12178bb0daffSRob Clark 12188bb0daffSRob Clark /* call fxn(arg), either synchronously or asynchronously if the op 12198bb0daffSRob Clark * is currently blocked.. fxn() can be called from any context 12208bb0daffSRob Clark * 12218bb0daffSRob Clark * (TODO for now fxn is called back from whichever context calls 12228bb0daffSRob Clark * omap_gem_op_update().. but this could be better defined later 12238bb0daffSRob Clark * if needed) 12248bb0daffSRob Clark * 12258bb0daffSRob Clark * TODO more code in common w/ _sync().. 12268bb0daffSRob Clark */ 12278bb0daffSRob Clark int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, 12288bb0daffSRob Clark void (*fxn)(void *arg), void *arg) 12298bb0daffSRob Clark { 12308bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 12318bb0daffSRob Clark if (omap_obj->sync) { 12328bb0daffSRob Clark struct omap_gem_sync_waiter *waiter = 12338bb0daffSRob Clark kzalloc(sizeof(*waiter), GFP_ATOMIC); 12348bb0daffSRob Clark 12358bb0daffSRob Clark if (!waiter) 12368bb0daffSRob Clark return -ENOMEM; 12378bb0daffSRob Clark 12388bb0daffSRob Clark waiter->omap_obj = omap_obj; 12398bb0daffSRob Clark waiter->op = op; 12408bb0daffSRob Clark waiter->read_target = omap_obj->sync->read_pending; 12418bb0daffSRob Clark waiter->write_target = omap_obj->sync->write_pending; 12428bb0daffSRob Clark waiter->notify = fxn; 12438bb0daffSRob Clark waiter->arg = arg; 12448bb0daffSRob Clark 12458bb0daffSRob Clark spin_lock(&sync_lock); 12468bb0daffSRob Clark if (is_waiting(waiter)) { 12478bb0daffSRob Clark SYNC("waited: %p", waiter); 12488bb0daffSRob Clark list_add_tail(&waiter->list, &waiters); 12498bb0daffSRob Clark spin_unlock(&sync_lock); 12508bb0daffSRob Clark return 0; 12518bb0daffSRob Clark } 12528bb0daffSRob Clark 12538bb0daffSRob Clark spin_unlock(&sync_lock); 125415ec2ca9SSubhajit Paul 125515ec2ca9SSubhajit Paul kfree(waiter); 12568bb0daffSRob Clark } 12578bb0daffSRob Clark 12588bb0daffSRob Clark /* no waiting.. */ 12598bb0daffSRob Clark fxn(arg); 12608bb0daffSRob Clark 12618bb0daffSRob Clark return 0; 12628bb0daffSRob Clark } 12638bb0daffSRob Clark 12648bb0daffSRob Clark /* special API so PVR can update the buffer to use a sync-object allocated 12658bb0daffSRob Clark * from it's sync-obj heap. Only used for a newly allocated (from PVR's 12668bb0daffSRob Clark * perspective) sync-object, so we overwrite the new syncobj w/ values 12678bb0daffSRob Clark * from the already allocated syncobj (if there is one) 12688bb0daffSRob Clark */ 12698bb0daffSRob Clark int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) 12708bb0daffSRob Clark { 12718bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 12728bb0daffSRob Clark int ret = 0; 12738bb0daffSRob Clark 12748bb0daffSRob Clark spin_lock(&sync_lock); 12758bb0daffSRob Clark 12768bb0daffSRob Clark if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { 12778bb0daffSRob Clark /* clearing a previously set syncobj */ 12788bb0daffSRob Clark syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), 12798bb0daffSRob Clark GFP_ATOMIC); 12808bb0daffSRob Clark if (!syncobj) { 12818bb0daffSRob Clark ret = -ENOMEM; 12828bb0daffSRob Clark goto unlock; 12838bb0daffSRob Clark } 12848bb0daffSRob Clark omap_obj->flags &= ~OMAP_BO_EXT_SYNC; 12858bb0daffSRob Clark omap_obj->sync = syncobj; 12868bb0daffSRob Clark } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { 12878bb0daffSRob Clark /* replacing an existing syncobj */ 12888bb0daffSRob Clark if (omap_obj->sync) { 12898bb0daffSRob Clark memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); 12908bb0daffSRob Clark kfree(omap_obj->sync); 12918bb0daffSRob Clark } 12928bb0daffSRob Clark omap_obj->flags |= OMAP_BO_EXT_SYNC; 12938bb0daffSRob Clark omap_obj->sync = syncobj; 12948bb0daffSRob Clark } 12958bb0daffSRob Clark 12968bb0daffSRob Clark unlock: 12978bb0daffSRob Clark spin_unlock(&sync_lock); 12988bb0daffSRob Clark return ret; 12998bb0daffSRob Clark } 13008bb0daffSRob Clark 13017ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 13027ef93b0aSLaurent Pinchart * Constructor & Destructor 13037ef93b0aSLaurent Pinchart */ 13047ef93b0aSLaurent Pinchart 13058bb0daffSRob Clark /* don't call directly.. called from GEM core when it is time to actually 13068bb0daffSRob Clark * free the object.. 13078bb0daffSRob Clark */ 13088bb0daffSRob Clark void omap_gem_free_object(struct drm_gem_object *obj) 13098bb0daffSRob Clark { 13108bb0daffSRob Clark struct drm_device *dev = obj->dev; 131176c4055fSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private; 13128bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 13138bb0daffSRob Clark 13148bb0daffSRob Clark evict(obj); 13158bb0daffSRob Clark 13168bb0daffSRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 13178bb0daffSRob Clark 131876c4055fSTomi Valkeinen spin_lock(&priv->list_lock); 13198bb0daffSRob Clark list_del(&omap_obj->mm_list); 132076c4055fSTomi Valkeinen spin_unlock(&priv->list_lock); 13218bb0daffSRob Clark 13228bb0daffSRob Clark /* this means the object is still pinned.. which really should 13238bb0daffSRob Clark * not happen. I think.. 13248bb0daffSRob Clark */ 13258bb0daffSRob Clark WARN_ON(omap_obj->paddr_cnt > 0); 13268bb0daffSRob Clark 13278bb0daffSRob Clark /* don't free externally allocated backing memory */ 13288bb0daffSRob Clark if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { 13298bb0daffSRob Clark if (omap_obj->pages) 13308bb0daffSRob Clark omap_gem_detach_pages(obj); 13318bb0daffSRob Clark 13328bb0daffSRob Clark if (!is_shmem(obj)) { 13338bb0daffSRob Clark dma_free_writecombine(dev->dev, obj->size, 13348bb0daffSRob Clark omap_obj->vaddr, omap_obj->paddr); 13358bb0daffSRob Clark } else if (omap_obj->vaddr) { 13368bb0daffSRob Clark vunmap(omap_obj->vaddr); 13378bb0daffSRob Clark } 13388bb0daffSRob Clark } 13398bb0daffSRob Clark 13408bb0daffSRob Clark /* don't free externally allocated syncobj */ 13418bb0daffSRob Clark if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) 13428bb0daffSRob Clark kfree(omap_obj->sync); 13438bb0daffSRob Clark 13448bb0daffSRob Clark drm_gem_object_release(obj); 13458bb0daffSRob Clark 134600e9c7c7SLaurent Pinchart kfree(omap_obj); 13478bb0daffSRob Clark } 13488bb0daffSRob Clark 13498bb0daffSRob Clark /* GEM buffer object constructor */ 13508bb0daffSRob Clark struct drm_gem_object *omap_gem_new(struct drm_device *dev, 13518bb0daffSRob Clark union omap_gem_size gsize, uint32_t flags) 13528bb0daffSRob Clark { 13538bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 13548bb0daffSRob Clark struct omap_gem_object *omap_obj; 135592b4b445SLaurent Pinchart struct drm_gem_object *obj; 1356ab5a60c3SDavid Herrmann struct address_space *mapping; 13578bb0daffSRob Clark size_t size; 13588bb0daffSRob Clark int ret; 13598bb0daffSRob Clark 13608bb0daffSRob Clark if (flags & OMAP_BO_TILED) { 1361f4302747SLaurent Pinchart if (!priv->usergart) { 13628bb0daffSRob Clark dev_err(dev->dev, "Tiled buffers require DMM\n"); 136392b4b445SLaurent Pinchart return NULL; 13648bb0daffSRob Clark } 13658bb0daffSRob Clark 13668bb0daffSRob Clark /* tiled buffers are always shmem paged backed.. when they are 13678bb0daffSRob Clark * scanned out, they are remapped into DMM/TILER 13688bb0daffSRob Clark */ 13698bb0daffSRob Clark flags &= ~OMAP_BO_SCANOUT; 13708bb0daffSRob Clark 13718bb0daffSRob Clark /* currently don't allow cached buffers.. there is some caching 13728bb0daffSRob Clark * stuff that needs to be handled better 13738bb0daffSRob Clark */ 13747cb0d6c1STomi Valkeinen flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); 13757cb0d6c1STomi Valkeinen flags |= tiler_get_cpu_cache_flags(); 13768bb0daffSRob Clark 13778bb0daffSRob Clark /* align dimensions to slot boundaries... */ 13788bb0daffSRob Clark tiler_align(gem2fmt(flags), 13798bb0daffSRob Clark &gsize.tiled.width, &gsize.tiled.height); 13808bb0daffSRob Clark 13818bb0daffSRob Clark /* ...and calculate size based on aligned dimensions */ 13828bb0daffSRob Clark size = tiler_size(gem2fmt(flags), 13838bb0daffSRob Clark gsize.tiled.width, gsize.tiled.height); 13848bb0daffSRob Clark } else { 13858bb0daffSRob Clark size = PAGE_ALIGN(gsize.bytes); 13868bb0daffSRob Clark } 13878bb0daffSRob Clark 13888bb0daffSRob Clark omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1389fffddfd6SLinus Torvalds if (!omap_obj) 1390a903e3b6STomi Valkeinen return NULL; 13918bb0daffSRob Clark 13928bb0daffSRob Clark obj = &omap_obj->base; 13938bb0daffSRob Clark 13948bb0daffSRob Clark if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 13958bb0daffSRob Clark /* attempt to allocate contiguous memory if we don't 13968bb0daffSRob Clark * have DMM for remappign discontiguous buffers 13978bb0daffSRob Clark */ 13988bb0daffSRob Clark omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 13998bb0daffSRob Clark &omap_obj->paddr, GFP_KERNEL); 1400a903e3b6STomi Valkeinen if (!omap_obj->vaddr) { 1401a903e3b6STomi Valkeinen kfree(omap_obj); 14028bb0daffSRob Clark 1403a903e3b6STomi Valkeinen return NULL; 14048bb0daffSRob Clark } 14058bb0daffSRob Clark 1406a903e3b6STomi Valkeinen flags |= OMAP_BO_DMA; 1407a903e3b6STomi Valkeinen } 1408a903e3b6STomi Valkeinen 1409a903e3b6STomi Valkeinen spin_lock(&priv->list_lock); 1410a903e3b6STomi Valkeinen list_add(&omap_obj->mm_list, &priv->obj_list); 1411a903e3b6STomi Valkeinen spin_unlock(&priv->list_lock); 1412a903e3b6STomi Valkeinen 14138bb0daffSRob Clark omap_obj->flags = flags; 14148bb0daffSRob Clark 14158bb0daffSRob Clark if (flags & OMAP_BO_TILED) { 14168bb0daffSRob Clark omap_obj->width = gsize.tiled.width; 14178bb0daffSRob Clark omap_obj->height = gsize.tiled.height; 14188bb0daffSRob Clark } 14198bb0daffSRob Clark 1420ab5a60c3SDavid Herrmann if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { 142189c8233fSDavid Herrmann drm_gem_private_object_init(dev, obj, size); 1422ab5a60c3SDavid Herrmann } else { 14238bb0daffSRob Clark ret = drm_gem_object_init(dev, obj, size); 14248bb0daffSRob Clark if (ret) 14258bb0daffSRob Clark goto fail; 14268bb0daffSRob Clark 1427ab5a60c3SDavid Herrmann mapping = file_inode(obj->filp)->i_mapping; 1428ab5a60c3SDavid Herrmann mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1429ab5a60c3SDavid Herrmann } 1430ab5a60c3SDavid Herrmann 14318bb0daffSRob Clark return obj; 14328bb0daffSRob Clark 14338bb0daffSRob Clark fail: 14348bb0daffSRob Clark omap_gem_free_object(obj); 14358bb0daffSRob Clark return NULL; 14368bb0daffSRob Clark } 14378bb0daffSRob Clark 14387ef93b0aSLaurent Pinchart /* convenience method to construct a GEM buffer object, and userspace handle */ 14397ef93b0aSLaurent Pinchart int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 14407ef93b0aSLaurent Pinchart union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 14417ef93b0aSLaurent Pinchart { 14427ef93b0aSLaurent Pinchart struct drm_gem_object *obj; 14437ef93b0aSLaurent Pinchart int ret; 14447ef93b0aSLaurent Pinchart 14457ef93b0aSLaurent Pinchart obj = omap_gem_new(dev, gsize, flags); 14467ef93b0aSLaurent Pinchart if (!obj) 14477ef93b0aSLaurent Pinchart return -ENOMEM; 14487ef93b0aSLaurent Pinchart 14497ef93b0aSLaurent Pinchart ret = drm_gem_handle_create(file, obj, handle); 14507ef93b0aSLaurent Pinchart if (ret) { 145174128a23SLaurent Pinchart omap_gem_free_object(obj); 14527ef93b0aSLaurent Pinchart return ret; 14537ef93b0aSLaurent Pinchart } 14547ef93b0aSLaurent Pinchart 14557ef93b0aSLaurent Pinchart /* drop reference from allocate - handle holds it now */ 14567ef93b0aSLaurent Pinchart drm_gem_object_unreference_unlocked(obj); 14577ef93b0aSLaurent Pinchart 14587ef93b0aSLaurent Pinchart return 0; 14597ef93b0aSLaurent Pinchart } 14607ef93b0aSLaurent Pinchart 14617ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 14627ef93b0aSLaurent Pinchart * Init & Cleanup 14637ef93b0aSLaurent Pinchart */ 14647ef93b0aSLaurent Pinchart 14657ef93b0aSLaurent Pinchart /* If DMM is used, we need to set some stuff up.. */ 14668bb0daffSRob Clark void omap_gem_init(struct drm_device *dev) 14678bb0daffSRob Clark { 14688bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 1469f4302747SLaurent Pinchart struct omap_drm_usergart *usergart; 14708bb0daffSRob Clark const enum tiler_fmt fmts[] = { 14718bb0daffSRob Clark TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 14728bb0daffSRob Clark }; 14738bb0daffSRob Clark int i, j; 14748bb0daffSRob Clark 14758bb0daffSRob Clark if (!dmm_is_available()) { 14768bb0daffSRob Clark /* DMM only supported on OMAP4 and later, so this isn't fatal */ 14778bb0daffSRob Clark dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 14788bb0daffSRob Clark return; 14798bb0daffSRob Clark } 14808bb0daffSRob Clark 1481fffddfd6SLinus Torvalds usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1482fffddfd6SLinus Torvalds if (!usergart) 14838bb0daffSRob Clark return; 14848bb0daffSRob Clark 14858bb0daffSRob Clark /* reserve 4k aligned/wide regions for userspace mappings: */ 14868bb0daffSRob Clark for (i = 0; i < ARRAY_SIZE(fmts); i++) { 14878bb0daffSRob Clark uint16_t h = 1, w = PAGE_SIZE >> i; 14888bb0daffSRob Clark tiler_align(fmts[i], &w, &h); 14898bb0daffSRob Clark /* note: since each region is 1 4kb page wide, and minimum 14908bb0daffSRob Clark * number of rows, the height ends up being the same as the 14918bb0daffSRob Clark * # of pages in the region 14928bb0daffSRob Clark */ 14938bb0daffSRob Clark usergart[i].height = h; 14948bb0daffSRob Clark usergart[i].height_shift = ilog2(h); 14958bb0daffSRob Clark usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 14968bb0daffSRob Clark usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 14978bb0daffSRob Clark for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1498f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 1499f4302747SLaurent Pinchart struct tiler_block *block; 1500f4302747SLaurent Pinchart 1501f4302747SLaurent Pinchart entry = &usergart[i].entry[j]; 1502f4302747SLaurent Pinchart block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); 15038bb0daffSRob Clark if (IS_ERR(block)) { 15048bb0daffSRob Clark dev_err(dev->dev, 15058bb0daffSRob Clark "reserve failed: %d, %d, %ld\n", 15068bb0daffSRob Clark i, j, PTR_ERR(block)); 15078bb0daffSRob Clark return; 15088bb0daffSRob Clark } 15098bb0daffSRob Clark entry->paddr = tiler_ssptr(block); 15108bb0daffSRob Clark entry->block = block; 15118bb0daffSRob Clark 15122d31ca3aSRussell King DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, 15132d31ca3aSRussell King &entry->paddr, 15148bb0daffSRob Clark usergart[i].stride_pfn << PAGE_SHIFT); 15158bb0daffSRob Clark } 15168bb0daffSRob Clark } 15178bb0daffSRob Clark 1518f4302747SLaurent Pinchart priv->usergart = usergart; 15198bb0daffSRob Clark priv->has_dmm = true; 15208bb0daffSRob Clark } 15218bb0daffSRob Clark 15228bb0daffSRob Clark void omap_gem_deinit(struct drm_device *dev) 15238bb0daffSRob Clark { 1524f4302747SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 1525f4302747SLaurent Pinchart 15268bb0daffSRob Clark /* I believe we can rely on there being no more outstanding GEM 15278bb0daffSRob Clark * objects which could depend on usergart/dmm at this point. 15288bb0daffSRob Clark */ 1529f4302747SLaurent Pinchart kfree(priv->usergart); 15308bb0daffSRob Clark } 1531