18bb0daffSRob Clark /* 28bb0daffSRob Clark * drivers/gpu/drm/omapdrm/omap_gem.c 38bb0daffSRob Clark * 48bb0daffSRob Clark * Copyright (C) 2011 Texas Instruments 58bb0daffSRob Clark * Author: Rob Clark <rob.clark@linaro.org> 68bb0daffSRob Clark * 78bb0daffSRob Clark * This program is free software; you can redistribute it and/or modify it 88bb0daffSRob Clark * under the terms of the GNU General Public License version 2 as published by 98bb0daffSRob Clark * the Free Software Foundation. 108bb0daffSRob Clark * 118bb0daffSRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 128bb0daffSRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 138bb0daffSRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 148bb0daffSRob Clark * more details. 158bb0daffSRob Clark * 168bb0daffSRob Clark * You should have received a copy of the GNU General Public License along with 178bb0daffSRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 188bb0daffSRob Clark */ 198bb0daffSRob Clark 208bb0daffSRob Clark #include <linux/shmem_fs.h> 212d278f54SLaurent Pinchart #include <linux/spinlock.h> 2201c8f1c4SDan Williams #include <linux/pfn_t.h> 232d278f54SLaurent Pinchart 240de23977SDavid Herrmann #include <drm/drm_vma_manager.h> 258bb0daffSRob Clark 268bb0daffSRob Clark #include "omap_drv.h" 278bb0daffSRob Clark #include "omap_dmm_tiler.h" 288bb0daffSRob Clark 298bb0daffSRob Clark /* 308bb0daffSRob Clark * GEM buffer object implementation. 318bb0daffSRob Clark */ 328bb0daffSRob Clark 338bb0daffSRob Clark /* note: we use upper 8 bits of flags for driver-internal flags: */ 34cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ 35cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ 36b22e6690SLaurent Pinchart #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ 378bb0daffSRob Clark 388bb0daffSRob Clark struct omap_gem_object { 398bb0daffSRob Clark struct drm_gem_object base; 408bb0daffSRob Clark 418bb0daffSRob Clark struct list_head mm_list; 428bb0daffSRob Clark 438bb0daffSRob Clark uint32_t flags; 448bb0daffSRob Clark 458bb0daffSRob Clark /** width/height for tiled formats (rounded up to slot boundaries) */ 468bb0daffSRob Clark uint16_t width, height; 478bb0daffSRob Clark 488bb0daffSRob Clark /** roll applied when mapping to DMM */ 498bb0daffSRob Clark uint32_t roll; 508bb0daffSRob Clark 518bb0daffSRob Clark /** 52b22e6690SLaurent Pinchart * paddr contains the buffer DMA address. It is valid for 538bb0daffSRob Clark * 54b22e6690SLaurent Pinchart * - buffers allocated through the DMA mapping API (with the 55b22e6690SLaurent Pinchart * OMAP_BO_MEM_DMA_API flag set) 56b22e6690SLaurent Pinchart * 57b22e6690SLaurent Pinchart * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) 58b22e6690SLaurent Pinchart * if they are physically contiguous (when sgt->orig_nents == 1) 59b22e6690SLaurent Pinchart * 60b22e6690SLaurent Pinchart * - buffers mapped through the TILER when paddr_cnt is not zero, in 61b22e6690SLaurent Pinchart * which case the DMA address points to the TILER aperture 62b22e6690SLaurent Pinchart * 63b22e6690SLaurent Pinchart * Physically contiguous buffers have their DMA address equal to the 64b22e6690SLaurent Pinchart * physical address as we don't remap those buffers through the TILER. 65b22e6690SLaurent Pinchart * 66b22e6690SLaurent Pinchart * Buffers mapped to the TILER have their DMA address pointing to the 67b22e6690SLaurent Pinchart * TILER aperture. As TILER mappings are refcounted (through paddr_cnt) 68b22e6690SLaurent Pinchart * the DMA address must be accessed through omap_get_get_paddr() to 69b22e6690SLaurent Pinchart * ensure that the mapping won't disappear unexpectedly. References must 70b22e6690SLaurent Pinchart * be released with omap_gem_put_paddr(). 718bb0daffSRob Clark */ 728bb0daffSRob Clark dma_addr_t paddr; 738bb0daffSRob Clark 748bb0daffSRob Clark /** 758bb0daffSRob Clark * # of users of paddr 768bb0daffSRob Clark */ 778bb0daffSRob Clark uint32_t paddr_cnt; 788bb0daffSRob Clark 798bb0daffSRob Clark /** 80b22e6690SLaurent Pinchart * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag 81b22e6690SLaurent Pinchart * is set and the sgt field is valid. 82b22e6690SLaurent Pinchart */ 83b22e6690SLaurent Pinchart struct sg_table *sgt; 84b22e6690SLaurent Pinchart 85b22e6690SLaurent Pinchart /** 868bb0daffSRob Clark * tiler block used when buffer is remapped in DMM/TILER. 878bb0daffSRob Clark */ 888bb0daffSRob Clark struct tiler_block *block; 898bb0daffSRob Clark 908bb0daffSRob Clark /** 918bb0daffSRob Clark * Array of backing pages, if allocated. Note that pages are never 928bb0daffSRob Clark * allocated for buffers originally allocated from contiguous memory 938bb0daffSRob Clark */ 948bb0daffSRob Clark struct page **pages; 958bb0daffSRob Clark 968bb0daffSRob Clark /** addresses corresponding to pages in above array */ 978bb0daffSRob Clark dma_addr_t *addrs; 988bb0daffSRob Clark 998bb0daffSRob Clark /** 1008bb0daffSRob Clark * Virtual address, if mapped. 1018bb0daffSRob Clark */ 1028bb0daffSRob Clark void *vaddr; 1038bb0daffSRob Clark 1048bb0daffSRob Clark /** 1058bb0daffSRob Clark * sync-object allocated on demand (if needed) 1068bb0daffSRob Clark * 1078bb0daffSRob Clark * Per-buffer sync-object for tracking pending and completed hw/dma 1083f50effdSTomi Valkeinen * read and write operations. 1098bb0daffSRob Clark */ 1108bb0daffSRob Clark struct { 1118bb0daffSRob Clark uint32_t write_pending; 1128bb0daffSRob Clark uint32_t write_complete; 1138bb0daffSRob Clark uint32_t read_pending; 1148bb0daffSRob Clark uint32_t read_complete; 1158bb0daffSRob Clark } *sync; 1168bb0daffSRob Clark }; 1178bb0daffSRob Clark 1187ef93b0aSLaurent Pinchart #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 1198bb0daffSRob Clark 1208bb0daffSRob Clark /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 1218bb0daffSRob Clark * not necessarily pinned in TILER all the time, and (b) when they are 1228bb0daffSRob Clark * they are not necessarily page aligned, we reserve one or more small 1238bb0daffSRob Clark * regions in each of the 2d containers to use as a user-GART where we 1248bb0daffSRob Clark * can create a second page-aligned mapping of parts of the buffer 1258bb0daffSRob Clark * being accessed from userspace. 1268bb0daffSRob Clark * 1278bb0daffSRob Clark * Note that we could optimize slightly when we know that multiple 1288bb0daffSRob Clark * tiler containers are backed by the same PAT.. but I'll leave that 1298bb0daffSRob Clark * for later.. 1308bb0daffSRob Clark */ 1318bb0daffSRob Clark #define NUM_USERGART_ENTRIES 2 132f4302747SLaurent Pinchart struct omap_drm_usergart_entry { 1338bb0daffSRob Clark struct tiler_block *block; /* the reserved tiler block */ 1348bb0daffSRob Clark dma_addr_t paddr; 1358bb0daffSRob Clark struct drm_gem_object *obj; /* the current pinned obj */ 1368bb0daffSRob Clark pgoff_t obj_pgoff; /* page offset of obj currently 1378bb0daffSRob Clark mapped in */ 1388bb0daffSRob Clark }; 139f4302747SLaurent Pinchart 140f4302747SLaurent Pinchart struct omap_drm_usergart { 141f4302747SLaurent Pinchart struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; 1428bb0daffSRob Clark int height; /* height in rows */ 1438bb0daffSRob Clark int height_shift; /* ilog2(height in rows) */ 1448bb0daffSRob Clark int slot_shift; /* ilog2(width per slot) */ 1458bb0daffSRob Clark int stride_pfn; /* stride in pages */ 1468bb0daffSRob Clark int last; /* index of last used entry */ 147f4302747SLaurent Pinchart }; 1488bb0daffSRob Clark 149b902f8f4SLaurent Pinchart /* ----------------------------------------------------------------------------- 150b902f8f4SLaurent Pinchart * Helpers 151b902f8f4SLaurent Pinchart */ 152b902f8f4SLaurent Pinchart 153b902f8f4SLaurent Pinchart /** get mmap offset */ 154b902f8f4SLaurent Pinchart static uint64_t mmap_offset(struct drm_gem_object *obj) 155b902f8f4SLaurent Pinchart { 156b902f8f4SLaurent Pinchart struct drm_device *dev = obj->dev; 157b902f8f4SLaurent Pinchart int ret; 158b902f8f4SLaurent Pinchart size_t size; 159b902f8f4SLaurent Pinchart 160b902f8f4SLaurent Pinchart WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 161b902f8f4SLaurent Pinchart 162b902f8f4SLaurent Pinchart /* Make it mmapable */ 163b902f8f4SLaurent Pinchart size = omap_gem_mmap_size(obj); 164b902f8f4SLaurent Pinchart ret = drm_gem_create_mmap_offset_size(obj, size); 165b902f8f4SLaurent Pinchart if (ret) { 166b902f8f4SLaurent Pinchart dev_err(dev->dev, "could not allocate mmap offset\n"); 167b902f8f4SLaurent Pinchart return 0; 168b902f8f4SLaurent Pinchart } 169b902f8f4SLaurent Pinchart 170b902f8f4SLaurent Pinchart return drm_vma_node_offset_addr(&obj->vma_node); 171b902f8f4SLaurent Pinchart } 172b902f8f4SLaurent Pinchart 173b22e6690SLaurent Pinchart static bool is_contiguous(struct omap_gem_object *omap_obj) 1747ef93b0aSLaurent Pinchart { 175b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API) 176b22e6690SLaurent Pinchart return true; 177b22e6690SLaurent Pinchart 178b22e6690SLaurent Pinchart if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) 179b22e6690SLaurent Pinchart return true; 180b22e6690SLaurent Pinchart 181b22e6690SLaurent Pinchart return false; 1827ef93b0aSLaurent Pinchart } 1837ef93b0aSLaurent Pinchart 1847ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 1857ef93b0aSLaurent Pinchart * Eviction 1867ef93b0aSLaurent Pinchart */ 1878bb0daffSRob Clark 1888bb0daffSRob Clark static void evict_entry(struct drm_gem_object *obj, 189f4302747SLaurent Pinchart enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) 1908bb0daffSRob Clark { 1918bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 192f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 193f4302747SLaurent Pinchart int n = priv->usergart[fmt].height; 1948bb0daffSRob Clark size_t size = PAGE_SIZE * n; 1958bb0daffSRob Clark loff_t off = mmap_offset(obj) + 1968bb0daffSRob Clark (entry->obj_pgoff << PAGE_SHIFT); 1978bb0daffSRob Clark const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 1986796cb16SDavid Herrmann 1998bb0daffSRob Clark if (m > 1) { 2008bb0daffSRob Clark int i; 2018bb0daffSRob Clark /* if stride > than PAGE_SIZE then sparse mapping: */ 2028bb0daffSRob Clark for (i = n; i > 0; i--) { 2036796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 2048bb0daffSRob Clark off, PAGE_SIZE, 1); 2058bb0daffSRob Clark off += PAGE_SIZE * m; 2068bb0daffSRob Clark } 2078bb0daffSRob Clark } else { 2086796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 2096796cb16SDavid Herrmann off, size, 1); 2108bb0daffSRob Clark } 2118bb0daffSRob Clark 2128bb0daffSRob Clark entry->obj = NULL; 2138bb0daffSRob Clark } 2148bb0daffSRob Clark 2158bb0daffSRob Clark /* Evict a buffer from usergart, if it is mapped there */ 2168bb0daffSRob Clark static void evict(struct drm_gem_object *obj) 2178bb0daffSRob Clark { 2188bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 219f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 2208bb0daffSRob Clark 2218bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 2228bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 2238bb0daffSRob Clark int i; 2248bb0daffSRob Clark 2258bb0daffSRob Clark for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 226f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry = 227f4302747SLaurent Pinchart &priv->usergart[fmt].entry[i]; 228f4302747SLaurent Pinchart 2298bb0daffSRob Clark if (entry->obj == obj) 2308bb0daffSRob Clark evict_entry(obj, fmt, entry); 2318bb0daffSRob Clark } 2328bb0daffSRob Clark } 2338bb0daffSRob Clark } 2348bb0daffSRob Clark 2357ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 2367ef93b0aSLaurent Pinchart * Page Management 2378bb0daffSRob Clark */ 2388bb0daffSRob Clark 2398bb0daffSRob Clark /** ensure backing pages are allocated */ 2408bb0daffSRob Clark static int omap_gem_attach_pages(struct drm_gem_object *obj) 2418bb0daffSRob Clark { 2428bb0daffSRob Clark struct drm_device *dev = obj->dev; 2438bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 2448bb0daffSRob Clark struct page **pages; 2458bb0daffSRob Clark int npages = obj->size >> PAGE_SHIFT; 2468bb0daffSRob Clark int i, ret; 2478bb0daffSRob Clark dma_addr_t *addrs; 2488bb0daffSRob Clark 2498bb0daffSRob Clark WARN_ON(omap_obj->pages); 2508bb0daffSRob Clark 2510cdbe8acSDavid Herrmann pages = drm_gem_get_pages(obj); 2528bb0daffSRob Clark if (IS_ERR(pages)) { 2538bb0daffSRob Clark dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 2548bb0daffSRob Clark return PTR_ERR(pages); 2558bb0daffSRob Clark } 2568bb0daffSRob Clark 2578bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because 2588bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent: 2598bb0daffSRob Clark */ 2608bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 2618bb0daffSRob Clark addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); 2628bb0daffSRob Clark if (!addrs) { 2638bb0daffSRob Clark ret = -ENOMEM; 2648bb0daffSRob Clark goto free_pages; 2658bb0daffSRob Clark } 2668bb0daffSRob Clark 2678bb0daffSRob Clark for (i = 0; i < npages; i++) { 2688bb0daffSRob Clark addrs[i] = dma_map_page(dev->dev, pages[i], 2698bb0daffSRob Clark 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 270579ef254STomi Valkeinen 271579ef254STomi Valkeinen if (dma_mapping_error(dev->dev, addrs[i])) { 272579ef254STomi Valkeinen dev_warn(dev->dev, 273579ef254STomi Valkeinen "%s: failed to map page\n", __func__); 274579ef254STomi Valkeinen 275579ef254STomi Valkeinen for (i = i - 1; i >= 0; --i) { 276579ef254STomi Valkeinen dma_unmap_page(dev->dev, addrs[i], 277579ef254STomi Valkeinen PAGE_SIZE, DMA_BIDIRECTIONAL); 278579ef254STomi Valkeinen } 279579ef254STomi Valkeinen 280579ef254STomi Valkeinen ret = -ENOMEM; 281579ef254STomi Valkeinen goto free_addrs; 282579ef254STomi Valkeinen } 2838bb0daffSRob Clark } 2848bb0daffSRob Clark } else { 2858bb0daffSRob Clark addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); 2868bb0daffSRob Clark if (!addrs) { 2878bb0daffSRob Clark ret = -ENOMEM; 2888bb0daffSRob Clark goto free_pages; 2898bb0daffSRob Clark } 2908bb0daffSRob Clark } 2918bb0daffSRob Clark 2928bb0daffSRob Clark omap_obj->addrs = addrs; 2938bb0daffSRob Clark omap_obj->pages = pages; 2948bb0daffSRob Clark 2958bb0daffSRob Clark return 0; 2968bb0daffSRob Clark 297579ef254STomi Valkeinen free_addrs: 298579ef254STomi Valkeinen kfree(addrs); 2998bb0daffSRob Clark free_pages: 300ddcd09d6SRob Clark drm_gem_put_pages(obj, pages, true, false); 3018bb0daffSRob Clark 3028bb0daffSRob Clark return ret; 3038bb0daffSRob Clark } 3048bb0daffSRob Clark 305b902f8f4SLaurent Pinchart /* acquire pages when needed (for example, for DMA where physically 306b902f8f4SLaurent Pinchart * contiguous buffer is not required 307b902f8f4SLaurent Pinchart */ 308b902f8f4SLaurent Pinchart static int get_pages(struct drm_gem_object *obj, struct page ***pages) 309b902f8f4SLaurent Pinchart { 310b902f8f4SLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj); 311b902f8f4SLaurent Pinchart int ret = 0; 312b902f8f4SLaurent Pinchart 313cdb0381dSLaurent Pinchart if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) { 314b902f8f4SLaurent Pinchart ret = omap_gem_attach_pages(obj); 315b902f8f4SLaurent Pinchart if (ret) { 316b902f8f4SLaurent Pinchart dev_err(obj->dev->dev, "could not attach pages\n"); 317b902f8f4SLaurent Pinchart return ret; 318b902f8f4SLaurent Pinchart } 319b902f8f4SLaurent Pinchart } 320b902f8f4SLaurent Pinchart 321b902f8f4SLaurent Pinchart /* TODO: even phys-contig.. we should have a list of pages? */ 322b902f8f4SLaurent Pinchart *pages = omap_obj->pages; 323b902f8f4SLaurent Pinchart 324b902f8f4SLaurent Pinchart return 0; 325b902f8f4SLaurent Pinchart } 326b902f8f4SLaurent Pinchart 3278bb0daffSRob Clark /** release backing pages */ 3288bb0daffSRob Clark static void omap_gem_detach_pages(struct drm_gem_object *obj) 3298bb0daffSRob Clark { 3308bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3318bb0daffSRob Clark 3328bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because 3338bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent: 3348bb0daffSRob Clark */ 3358bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 3368bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT; 3378bb0daffSRob Clark for (i = 0; i < npages; i++) { 3388bb0daffSRob Clark dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], 3398bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 3408bb0daffSRob Clark } 3418bb0daffSRob Clark } 3428bb0daffSRob Clark 3438bb0daffSRob Clark kfree(omap_obj->addrs); 3448bb0daffSRob Clark omap_obj->addrs = NULL; 3458bb0daffSRob Clark 346ddcd09d6SRob Clark drm_gem_put_pages(obj, omap_obj->pages, true, false); 3478bb0daffSRob Clark omap_obj->pages = NULL; 3488bb0daffSRob Clark } 3498bb0daffSRob Clark 3508bb0daffSRob Clark /* get buffer flags */ 3518bb0daffSRob Clark uint32_t omap_gem_flags(struct drm_gem_object *obj) 3528bb0daffSRob Clark { 3538bb0daffSRob Clark return to_omap_bo(obj)->flags; 3548bb0daffSRob Clark } 3558bb0daffSRob Clark 3568bb0daffSRob Clark uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 3578bb0daffSRob Clark { 3588bb0daffSRob Clark uint64_t offset; 3598bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 3608bb0daffSRob Clark offset = mmap_offset(obj); 3618bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 3628bb0daffSRob Clark return offset; 3638bb0daffSRob Clark } 3648bb0daffSRob Clark 3658bb0daffSRob Clark /** get mmap size */ 3668bb0daffSRob Clark size_t omap_gem_mmap_size(struct drm_gem_object *obj) 3678bb0daffSRob Clark { 3688bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3698bb0daffSRob Clark size_t size = obj->size; 3708bb0daffSRob Clark 3718bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 3728bb0daffSRob Clark /* for tiled buffers, the virtual size has stride rounded up 3738bb0daffSRob Clark * to 4kb.. (to hide the fact that row n+1 might start 16kb or 3748bb0daffSRob Clark * 32kb later!). But we don't back the entire buffer with 3758bb0daffSRob Clark * pages, only the valid picture part.. so need to adjust for 3768bb0daffSRob Clark * this in the size used to mmap and generate mmap offset 3778bb0daffSRob Clark */ 3788bb0daffSRob Clark size = tiler_vsize(gem2fmt(omap_obj->flags), 3798bb0daffSRob Clark omap_obj->width, omap_obj->height); 3808bb0daffSRob Clark } 3818bb0daffSRob Clark 3828bb0daffSRob Clark return size; 3838bb0daffSRob Clark } 3848bb0daffSRob Clark 3858bb0daffSRob Clark /* get tiled size, returns -EINVAL if not tiled buffer */ 3868bb0daffSRob Clark int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) 3878bb0daffSRob Clark { 3888bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3898bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 3908bb0daffSRob Clark *w = omap_obj->width; 3918bb0daffSRob Clark *h = omap_obj->height; 3928bb0daffSRob Clark return 0; 3938bb0daffSRob Clark } 3948bb0daffSRob Clark return -EINVAL; 3958bb0daffSRob Clark } 3968bb0daffSRob Clark 3977ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 3987ef93b0aSLaurent Pinchart * Fault Handling 3997ef93b0aSLaurent Pinchart */ 4007ef93b0aSLaurent Pinchart 4018bb0daffSRob Clark /* Normal handling for the case of faulting in non-tiled buffers */ 4028bb0daffSRob Clark static int fault_1d(struct drm_gem_object *obj, 4038bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 4048bb0daffSRob Clark { 4058bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 4068bb0daffSRob Clark unsigned long pfn; 4078bb0daffSRob Clark pgoff_t pgoff; 4088bb0daffSRob Clark 4098bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 4108bb0daffSRob Clark pgoff = ((unsigned long)vmf->virtual_address - 4118bb0daffSRob Clark vma->vm_start) >> PAGE_SHIFT; 4128bb0daffSRob Clark 4138bb0daffSRob Clark if (omap_obj->pages) { 4148bb0daffSRob Clark omap_gem_cpu_sync(obj, pgoff); 4158bb0daffSRob Clark pfn = page_to_pfn(omap_obj->pages[pgoff]); 4168bb0daffSRob Clark } else { 417b22e6690SLaurent Pinchart BUG_ON(!is_contiguous(omap_obj)); 4188bb0daffSRob Clark pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; 4198bb0daffSRob Clark } 4208bb0daffSRob Clark 4218bb0daffSRob Clark VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 4228bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 4238bb0daffSRob Clark 42401c8f1c4SDan Williams return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, 42501c8f1c4SDan Williams __pfn_to_pfn_t(pfn, PFN_DEV)); 4268bb0daffSRob Clark } 4278bb0daffSRob Clark 4288bb0daffSRob Clark /* Special handling for the case of faulting in 2d tiled buffers */ 4298bb0daffSRob Clark static int fault_2d(struct drm_gem_object *obj, 4308bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 4318bb0daffSRob Clark { 4328bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 433f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 434f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 4358bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 4368bb0daffSRob Clark struct page *pages[64]; /* XXX is this too much to have on stack? */ 4378bb0daffSRob Clark unsigned long pfn; 4388bb0daffSRob Clark pgoff_t pgoff, base_pgoff; 4398bb0daffSRob Clark void __user *vaddr; 4408bb0daffSRob Clark int i, ret, slots; 4418bb0daffSRob Clark 4428bb0daffSRob Clark /* 4438bb0daffSRob Clark * Note the height of the slot is also equal to the number of pages 4448bb0daffSRob Clark * that need to be mapped in to fill 4kb wide CPU page. If the slot 4458bb0daffSRob Clark * height is 64, then 64 pages fill a 4kb wide by 64 row region. 4468bb0daffSRob Clark */ 447f4302747SLaurent Pinchart const int n = priv->usergart[fmt].height; 448f4302747SLaurent Pinchart const int n_shift = priv->usergart[fmt].height_shift; 4498bb0daffSRob Clark 4508bb0daffSRob Clark /* 4518bb0daffSRob Clark * If buffer width in bytes > PAGE_SIZE then the virtual stride is 4528bb0daffSRob Clark * rounded up to next multiple of PAGE_SIZE.. this need to be taken 4538bb0daffSRob Clark * into account in some of the math, so figure out virtual stride 4548bb0daffSRob Clark * in pages 4558bb0daffSRob Clark */ 4568bb0daffSRob Clark const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 4578bb0daffSRob Clark 4588bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 4598bb0daffSRob Clark pgoff = ((unsigned long)vmf->virtual_address - 4608bb0daffSRob Clark vma->vm_start) >> PAGE_SHIFT; 4618bb0daffSRob Clark 4628bb0daffSRob Clark /* 4638bb0daffSRob Clark * Actual address we start mapping at is rounded down to previous slot 4648bb0daffSRob Clark * boundary in the y direction: 4658bb0daffSRob Clark */ 4668bb0daffSRob Clark base_pgoff = round_down(pgoff, m << n_shift); 4678bb0daffSRob Clark 4688bb0daffSRob Clark /* figure out buffer width in slots */ 469f4302747SLaurent Pinchart slots = omap_obj->width >> priv->usergart[fmt].slot_shift; 4708bb0daffSRob Clark 4718bb0daffSRob Clark vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); 4728bb0daffSRob Clark 473f4302747SLaurent Pinchart entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; 4748bb0daffSRob Clark 4758bb0daffSRob Clark /* evict previous buffer using this usergart entry, if any: */ 4768bb0daffSRob Clark if (entry->obj) 4778bb0daffSRob Clark evict_entry(entry->obj, fmt, entry); 4788bb0daffSRob Clark 4798bb0daffSRob Clark entry->obj = obj; 4808bb0daffSRob Clark entry->obj_pgoff = base_pgoff; 4818bb0daffSRob Clark 4828bb0daffSRob Clark /* now convert base_pgoff to phys offset from virt offset: */ 4838bb0daffSRob Clark base_pgoff = (base_pgoff >> n_shift) * slots; 4848bb0daffSRob Clark 4858bb0daffSRob Clark /* for wider-than 4k.. figure out which part of the slot-row we want: */ 4868bb0daffSRob Clark if (m > 1) { 4878bb0daffSRob Clark int off = pgoff % m; 4888bb0daffSRob Clark entry->obj_pgoff += off; 4898bb0daffSRob Clark base_pgoff /= m; 4908bb0daffSRob Clark slots = min(slots - (off << n_shift), n); 4918bb0daffSRob Clark base_pgoff += off << n_shift; 4928bb0daffSRob Clark vaddr += off << PAGE_SHIFT; 4938bb0daffSRob Clark } 4948bb0daffSRob Clark 4958bb0daffSRob Clark /* 4968bb0daffSRob Clark * Map in pages. Beyond the valid pixel part of the buffer, we set 4978bb0daffSRob Clark * pages[i] to NULL to get a dummy page mapped in.. if someone 4988bb0daffSRob Clark * reads/writes it they will get random/undefined content, but at 4998bb0daffSRob Clark * least it won't be corrupting whatever other random page used to 5008bb0daffSRob Clark * be mapped in, or other undefined behavior. 5018bb0daffSRob Clark */ 5028bb0daffSRob Clark memcpy(pages, &omap_obj->pages[base_pgoff], 5038bb0daffSRob Clark sizeof(struct page *) * slots); 5048bb0daffSRob Clark memset(pages + slots, 0, 5058bb0daffSRob Clark sizeof(struct page *) * (n - slots)); 5068bb0daffSRob Clark 5078bb0daffSRob Clark ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 5088bb0daffSRob Clark if (ret) { 5098bb0daffSRob Clark dev_err(obj->dev->dev, "failed to pin: %d\n", ret); 5108bb0daffSRob Clark return ret; 5118bb0daffSRob Clark } 5128bb0daffSRob Clark 5138bb0daffSRob Clark pfn = entry->paddr >> PAGE_SHIFT; 5148bb0daffSRob Clark 5158bb0daffSRob Clark VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 5168bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 5178bb0daffSRob Clark 5188bb0daffSRob Clark for (i = n; i > 0; i--) { 51901c8f1c4SDan Williams vm_insert_mixed(vma, (unsigned long)vaddr, 52001c8f1c4SDan Williams __pfn_to_pfn_t(pfn, PFN_DEV)); 521f4302747SLaurent Pinchart pfn += priv->usergart[fmt].stride_pfn; 5228bb0daffSRob Clark vaddr += PAGE_SIZE * m; 5238bb0daffSRob Clark } 5248bb0daffSRob Clark 5258bb0daffSRob Clark /* simple round-robin: */ 526f4302747SLaurent Pinchart priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) 527f4302747SLaurent Pinchart % NUM_USERGART_ENTRIES; 5288bb0daffSRob Clark 5298bb0daffSRob Clark return 0; 5308bb0daffSRob Clark } 5318bb0daffSRob Clark 5328bb0daffSRob Clark /** 5338bb0daffSRob Clark * omap_gem_fault - pagefault handler for GEM objects 5348bb0daffSRob Clark * @vma: the VMA of the GEM object 5358bb0daffSRob Clark * @vmf: fault detail 5368bb0daffSRob Clark * 5378bb0daffSRob Clark * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 5388bb0daffSRob Clark * does most of the work for us including the actual map/unmap calls 5398bb0daffSRob Clark * but we need to do the actual page work. 5408bb0daffSRob Clark * 5418bb0daffSRob Clark * The VMA was set up by GEM. In doing so it also ensured that the 5428bb0daffSRob Clark * vma->vm_private_data points to the GEM object that is backing this 5438bb0daffSRob Clark * mapping. 5448bb0daffSRob Clark */ 5458bb0daffSRob Clark int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 5468bb0daffSRob Clark { 5478bb0daffSRob Clark struct drm_gem_object *obj = vma->vm_private_data; 5488bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 5498bb0daffSRob Clark struct drm_device *dev = obj->dev; 5508bb0daffSRob Clark struct page **pages; 5518bb0daffSRob Clark int ret; 5528bb0daffSRob Clark 5538bb0daffSRob Clark /* Make sure we don't parallel update on a fault, nor move or remove 5548bb0daffSRob Clark * something from beneath our feet 5558bb0daffSRob Clark */ 5568bb0daffSRob Clark mutex_lock(&dev->struct_mutex); 5578bb0daffSRob Clark 5588bb0daffSRob Clark /* if a shmem backed object, make sure we have pages attached now */ 5598bb0daffSRob Clark ret = get_pages(obj, &pages); 5608bb0daffSRob Clark if (ret) 5618bb0daffSRob Clark goto fail; 5628bb0daffSRob Clark 5638bb0daffSRob Clark /* where should we do corresponding put_pages().. we are mapping 5648bb0daffSRob Clark * the original page, rather than thru a GART, so we can't rely 5658bb0daffSRob Clark * on eviction to trigger this. But munmap() or all mappings should 5668bb0daffSRob Clark * probably trigger put_pages()? 5678bb0daffSRob Clark */ 5688bb0daffSRob Clark 5698bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) 5708bb0daffSRob Clark ret = fault_2d(obj, vma, vmf); 5718bb0daffSRob Clark else 5728bb0daffSRob Clark ret = fault_1d(obj, vma, vmf); 5738bb0daffSRob Clark 5748bb0daffSRob Clark 5758bb0daffSRob Clark fail: 5768bb0daffSRob Clark mutex_unlock(&dev->struct_mutex); 5778bb0daffSRob Clark switch (ret) { 5788bb0daffSRob Clark case 0: 5798bb0daffSRob Clark case -ERESTARTSYS: 5808bb0daffSRob Clark case -EINTR: 581e1d4ee0fSRob Clark case -EBUSY: 582e1d4ee0fSRob Clark /* 583e1d4ee0fSRob Clark * EBUSY is ok: this just means that another thread 584e1d4ee0fSRob Clark * already did the job. 585e1d4ee0fSRob Clark */ 5868bb0daffSRob Clark return VM_FAULT_NOPAGE; 5878bb0daffSRob Clark case -ENOMEM: 5888bb0daffSRob Clark return VM_FAULT_OOM; 5898bb0daffSRob Clark default: 5908bb0daffSRob Clark return VM_FAULT_SIGBUS; 5918bb0daffSRob Clark } 5928bb0daffSRob Clark } 5938bb0daffSRob Clark 5948bb0daffSRob Clark /** We override mainly to fix up some of the vm mapping flags.. */ 5958bb0daffSRob Clark int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 5968bb0daffSRob Clark { 5978bb0daffSRob Clark int ret; 5988bb0daffSRob Clark 5998bb0daffSRob Clark ret = drm_gem_mmap(filp, vma); 6008bb0daffSRob Clark if (ret) { 6018bb0daffSRob Clark DBG("mmap failed: %d", ret); 6028bb0daffSRob Clark return ret; 6038bb0daffSRob Clark } 6048bb0daffSRob Clark 6058bb0daffSRob Clark return omap_gem_mmap_obj(vma->vm_private_data, vma); 6068bb0daffSRob Clark } 6078bb0daffSRob Clark 6088bb0daffSRob Clark int omap_gem_mmap_obj(struct drm_gem_object *obj, 6098bb0daffSRob Clark struct vm_area_struct *vma) 6108bb0daffSRob Clark { 6118bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 6128bb0daffSRob Clark 6138bb0daffSRob Clark vma->vm_flags &= ~VM_PFNMAP; 6148bb0daffSRob Clark vma->vm_flags |= VM_MIXEDMAP; 6158bb0daffSRob Clark 6168bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_WC) { 6178bb0daffSRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 6188bb0daffSRob Clark } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 6198bb0daffSRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 6208bb0daffSRob Clark } else { 6218bb0daffSRob Clark /* 6228bb0daffSRob Clark * We do have some private objects, at least for scanout buffers 6238bb0daffSRob Clark * on hardware without DMM/TILER. But these are allocated write- 6248bb0daffSRob Clark * combine 6258bb0daffSRob Clark */ 6268bb0daffSRob Clark if (WARN_ON(!obj->filp)) 6278bb0daffSRob Clark return -EINVAL; 6288bb0daffSRob Clark 6298bb0daffSRob Clark /* 6308bb0daffSRob Clark * Shunt off cached objs to shmem file so they have their own 6318bb0daffSRob Clark * address_space (so unmap_mapping_range does what we want, 6328bb0daffSRob Clark * in particular in the case of mmap'd dmabufs) 6338bb0daffSRob Clark */ 6348bb0daffSRob Clark fput(vma->vm_file); 6358bb0daffSRob Clark vma->vm_pgoff = 0; 6368bb0daffSRob Clark vma->vm_file = get_file(obj->filp); 6378bb0daffSRob Clark 6388bb0daffSRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 6398bb0daffSRob Clark } 6408bb0daffSRob Clark 6418bb0daffSRob Clark return 0; 6428bb0daffSRob Clark } 6438bb0daffSRob Clark 6447ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 6457ef93b0aSLaurent Pinchart * Dumb Buffers 6467ef93b0aSLaurent Pinchart */ 6478bb0daffSRob Clark 6488bb0daffSRob Clark /** 6498bb0daffSRob Clark * omap_gem_dumb_create - create a dumb buffer 6508bb0daffSRob Clark * @drm_file: our client file 6518bb0daffSRob Clark * @dev: our device 6528bb0daffSRob Clark * @args: the requested arguments copied from userspace 6538bb0daffSRob Clark * 6548bb0daffSRob Clark * Allocate a buffer suitable for use for a frame buffer of the 6558bb0daffSRob Clark * form described by user space. Give userspace a handle by which 6568bb0daffSRob Clark * to reference it. 6578bb0daffSRob Clark */ 6588bb0daffSRob Clark int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 6598bb0daffSRob Clark struct drm_mode_create_dumb *args) 6608bb0daffSRob Clark { 6618bb0daffSRob Clark union omap_gem_size gsize; 6628bb0daffSRob Clark 663bdb2b933SThierry Reding args->pitch = align_pitch(0, args->width, args->bpp); 6648bb0daffSRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 6658bb0daffSRob Clark 6668bb0daffSRob Clark gsize = (union omap_gem_size){ 6678bb0daffSRob Clark .bytes = args->size, 6688bb0daffSRob Clark }; 6698bb0daffSRob Clark 6708bb0daffSRob Clark return omap_gem_new_handle(dev, file, gsize, 6718bb0daffSRob Clark OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 6728bb0daffSRob Clark } 6738bb0daffSRob Clark 6748bb0daffSRob Clark /** 6758bb0daffSRob Clark * omap_gem_dumb_map - buffer mapping for dumb interface 6768bb0daffSRob Clark * @file: our drm client file 6778bb0daffSRob Clark * @dev: drm device 6788bb0daffSRob Clark * @handle: GEM handle to the object (from dumb_create) 6798bb0daffSRob Clark * 6808bb0daffSRob Clark * Do the necessary setup to allow the mapping of the frame buffer 6818bb0daffSRob Clark * into user memory. We don't have to do much here at the moment. 6828bb0daffSRob Clark */ 6838bb0daffSRob Clark int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 6848bb0daffSRob Clark uint32_t handle, uint64_t *offset) 6858bb0daffSRob Clark { 6868bb0daffSRob Clark struct drm_gem_object *obj; 6878bb0daffSRob Clark int ret = 0; 6888bb0daffSRob Clark 6898bb0daffSRob Clark /* GEM does all our handle to object mapping */ 690a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 6918bb0daffSRob Clark if (obj == NULL) { 6928bb0daffSRob Clark ret = -ENOENT; 6938bb0daffSRob Clark goto fail; 6948bb0daffSRob Clark } 6958bb0daffSRob Clark 6968bb0daffSRob Clark *offset = omap_gem_mmap_offset(obj); 6978bb0daffSRob Clark 6988bb0daffSRob Clark drm_gem_object_unreference_unlocked(obj); 6998bb0daffSRob Clark 7008bb0daffSRob Clark fail: 7018bb0daffSRob Clark return ret; 7028bb0daffSRob Clark } 7038bb0daffSRob Clark 704e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 7058bb0daffSRob Clark /* Set scrolling position. This allows us to implement fast scrolling 7068bb0daffSRob Clark * for console. 7078bb0daffSRob Clark * 7088bb0daffSRob Clark * Call only from non-atomic contexts. 7098bb0daffSRob Clark */ 7108bb0daffSRob Clark int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 7118bb0daffSRob Clark { 7128bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7138bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 7148bb0daffSRob Clark int ret = 0; 7158bb0daffSRob Clark 7168bb0daffSRob Clark if (roll > npages) { 7178bb0daffSRob Clark dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 7188bb0daffSRob Clark return -EINVAL; 7198bb0daffSRob Clark } 7208bb0daffSRob Clark 7218bb0daffSRob Clark omap_obj->roll = roll; 7228bb0daffSRob Clark 7238bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 7248bb0daffSRob Clark 7258bb0daffSRob Clark /* if we aren't mapped yet, we don't need to do anything */ 7268bb0daffSRob Clark if (omap_obj->block) { 7278bb0daffSRob Clark struct page **pages; 7288bb0daffSRob Clark ret = get_pages(obj, &pages); 7298bb0daffSRob Clark if (ret) 7308bb0daffSRob Clark goto fail; 7318bb0daffSRob Clark ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 7328bb0daffSRob Clark if (ret) 7338bb0daffSRob Clark dev_err(obj->dev->dev, "could not repin: %d\n", ret); 7348bb0daffSRob Clark } 7358bb0daffSRob Clark 7368bb0daffSRob Clark fail: 7378bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 7388bb0daffSRob Clark 7398bb0daffSRob Clark return ret; 7408bb0daffSRob Clark } 741e1c1174fSLaurent Pinchart #endif 7428bb0daffSRob Clark 7437ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 7447ef93b0aSLaurent Pinchart * Memory Management & DMA Sync 7457ef93b0aSLaurent Pinchart */ 7467ef93b0aSLaurent Pinchart 7477ef93b0aSLaurent Pinchart /** 7487ef93b0aSLaurent Pinchart * shmem buffers that are mapped cached can simulate coherency via using 7497ef93b0aSLaurent Pinchart * page faulting to keep track of dirty pages 7507ef93b0aSLaurent Pinchart */ 7517ef93b0aSLaurent Pinchart static inline bool is_cached_coherent(struct drm_gem_object *obj) 7527ef93b0aSLaurent Pinchart { 7537ef93b0aSLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj); 754cdb0381dSLaurent Pinchart 755cdb0381dSLaurent Pinchart return (omap_obj->flags & OMAP_BO_MEM_SHMEM) && 7567ef93b0aSLaurent Pinchart ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); 7577ef93b0aSLaurent Pinchart } 7588bb0daffSRob Clark 7598bb0daffSRob Clark /* Sync the buffer for CPU access.. note pages should already be 7608bb0daffSRob Clark * attached, ie. omap_gem_get_pages() 7618bb0daffSRob Clark */ 7628bb0daffSRob Clark void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) 7638bb0daffSRob Clark { 7648bb0daffSRob Clark struct drm_device *dev = obj->dev; 7658bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7668bb0daffSRob Clark 7678bb0daffSRob Clark if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { 7688bb0daffSRob Clark dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], 7698bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 7708bb0daffSRob Clark omap_obj->addrs[pgoff] = 0; 7718bb0daffSRob Clark } 7728bb0daffSRob Clark } 7738bb0daffSRob Clark 7748bb0daffSRob Clark /* sync the buffer for DMA access */ 7758bb0daffSRob Clark void omap_gem_dma_sync(struct drm_gem_object *obj, 7768bb0daffSRob Clark enum dma_data_direction dir) 7778bb0daffSRob Clark { 7788bb0daffSRob Clark struct drm_device *dev = obj->dev; 7798bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7808bb0daffSRob Clark 7818bb0daffSRob Clark if (is_cached_coherent(obj)) { 7828bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT; 7838bb0daffSRob Clark struct page **pages = omap_obj->pages; 7848bb0daffSRob Clark bool dirty = false; 7858bb0daffSRob Clark 7868bb0daffSRob Clark for (i = 0; i < npages; i++) { 7878bb0daffSRob Clark if (!omap_obj->addrs[i]) { 788a3d6345dSTomi Valkeinen dma_addr_t addr; 789a3d6345dSTomi Valkeinen 790a3d6345dSTomi Valkeinen addr = dma_map_page(dev->dev, pages[i], 0, 7918bb0daffSRob Clark PAGE_SIZE, DMA_BIDIRECTIONAL); 792a3d6345dSTomi Valkeinen 793a3d6345dSTomi Valkeinen if (dma_mapping_error(dev->dev, addr)) { 794a3d6345dSTomi Valkeinen dev_warn(dev->dev, 795a3d6345dSTomi Valkeinen "%s: failed to map page\n", 796a3d6345dSTomi Valkeinen __func__); 797a3d6345dSTomi Valkeinen break; 798a3d6345dSTomi Valkeinen } 799a3d6345dSTomi Valkeinen 8008bb0daffSRob Clark dirty = true; 801a3d6345dSTomi Valkeinen omap_obj->addrs[i] = addr; 8028bb0daffSRob Clark } 8038bb0daffSRob Clark } 8048bb0daffSRob Clark 8058bb0daffSRob Clark if (dirty) { 8068bb0daffSRob Clark unmap_mapping_range(obj->filp->f_mapping, 0, 8078bb0daffSRob Clark omap_gem_mmap_size(obj), 1); 8088bb0daffSRob Clark } 8098bb0daffSRob Clark } 8108bb0daffSRob Clark } 8118bb0daffSRob Clark 8128bb0daffSRob Clark /* Get physical address for DMA.. if 'remap' is true, and the buffer is not 8138bb0daffSRob Clark * already contiguous, remap it to pin in physically contiguous memory.. (ie. 8148bb0daffSRob Clark * map in TILER) 8158bb0daffSRob Clark */ 8168bb0daffSRob Clark int omap_gem_get_paddr(struct drm_gem_object *obj, 8178bb0daffSRob Clark dma_addr_t *paddr, bool remap) 8188bb0daffSRob Clark { 8198bb0daffSRob Clark struct omap_drm_private *priv = obj->dev->dev_private; 8208bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 8218bb0daffSRob Clark int ret = 0; 8228bb0daffSRob Clark 8238bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 8248bb0daffSRob Clark 825b22e6690SLaurent Pinchart if (!is_contiguous(omap_obj) && remap && priv->has_dmm) { 8268bb0daffSRob Clark if (omap_obj->paddr_cnt == 0) { 8278bb0daffSRob Clark struct page **pages; 8288bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 8298bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 8308bb0daffSRob Clark struct tiler_block *block; 8318bb0daffSRob Clark 8328bb0daffSRob Clark BUG_ON(omap_obj->block); 8338bb0daffSRob Clark 8348bb0daffSRob Clark ret = get_pages(obj, &pages); 8358bb0daffSRob Clark if (ret) 8368bb0daffSRob Clark goto fail; 8378bb0daffSRob Clark 8388bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 8398bb0daffSRob Clark block = tiler_reserve_2d(fmt, 8408bb0daffSRob Clark omap_obj->width, 8418bb0daffSRob Clark omap_obj->height, 0); 8428bb0daffSRob Clark } else { 8438bb0daffSRob Clark block = tiler_reserve_1d(obj->size); 8448bb0daffSRob Clark } 8458bb0daffSRob Clark 8468bb0daffSRob Clark if (IS_ERR(block)) { 8478bb0daffSRob Clark ret = PTR_ERR(block); 8488bb0daffSRob Clark dev_err(obj->dev->dev, 8498bb0daffSRob Clark "could not remap: %d (%d)\n", ret, fmt); 8508bb0daffSRob Clark goto fail; 8518bb0daffSRob Clark } 8528bb0daffSRob Clark 8538bb0daffSRob Clark /* TODO: enable async refill.. */ 8548bb0daffSRob Clark ret = tiler_pin(block, pages, npages, 8558bb0daffSRob Clark omap_obj->roll, true); 8568bb0daffSRob Clark if (ret) { 8578bb0daffSRob Clark tiler_release(block); 8588bb0daffSRob Clark dev_err(obj->dev->dev, 8598bb0daffSRob Clark "could not pin: %d\n", ret); 8608bb0daffSRob Clark goto fail; 8618bb0daffSRob Clark } 8628bb0daffSRob Clark 8638bb0daffSRob Clark omap_obj->paddr = tiler_ssptr(block); 8648bb0daffSRob Clark omap_obj->block = block; 8658bb0daffSRob Clark 8662d31ca3aSRussell King DBG("got paddr: %pad", &omap_obj->paddr); 8678bb0daffSRob Clark } 8688bb0daffSRob Clark 8698bb0daffSRob Clark omap_obj->paddr_cnt++; 8708bb0daffSRob Clark 8718bb0daffSRob Clark *paddr = omap_obj->paddr; 872b22e6690SLaurent Pinchart } else if (is_contiguous(omap_obj)) { 8738bb0daffSRob Clark *paddr = omap_obj->paddr; 8748bb0daffSRob Clark } else { 8758bb0daffSRob Clark ret = -EINVAL; 8768bb0daffSRob Clark goto fail; 8778bb0daffSRob Clark } 8788bb0daffSRob Clark 8798bb0daffSRob Clark fail: 8808bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 8818bb0daffSRob Clark 8828bb0daffSRob Clark return ret; 8838bb0daffSRob Clark } 8848bb0daffSRob Clark 8858bb0daffSRob Clark /* Release physical address, when DMA is no longer being performed.. this 8868bb0daffSRob Clark * could potentially unpin and unmap buffers from TILER 8878bb0daffSRob Clark */ 888393a949fSTomi Valkeinen void omap_gem_put_paddr(struct drm_gem_object *obj) 8898bb0daffSRob Clark { 8908bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 891393a949fSTomi Valkeinen int ret; 8928bb0daffSRob Clark 8938bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 8948bb0daffSRob Clark if (omap_obj->paddr_cnt > 0) { 8958bb0daffSRob Clark omap_obj->paddr_cnt--; 8968bb0daffSRob Clark if (omap_obj->paddr_cnt == 0) { 8978bb0daffSRob Clark ret = tiler_unpin(omap_obj->block); 8988bb0daffSRob Clark if (ret) { 8998bb0daffSRob Clark dev_err(obj->dev->dev, 9008bb0daffSRob Clark "could not unpin pages: %d\n", ret); 9018bb0daffSRob Clark } 9028bb0daffSRob Clark ret = tiler_release(omap_obj->block); 9038bb0daffSRob Clark if (ret) { 9048bb0daffSRob Clark dev_err(obj->dev->dev, 9058bb0daffSRob Clark "could not release unmap: %d\n", ret); 9068bb0daffSRob Clark } 9073f4d17c4STomi Valkeinen omap_obj->paddr = 0; 9088bb0daffSRob Clark omap_obj->block = NULL; 9098bb0daffSRob Clark } 9108bb0daffSRob Clark } 911393a949fSTomi Valkeinen 9128bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 9138bb0daffSRob Clark } 9148bb0daffSRob Clark 9158bb0daffSRob Clark /* Get rotated scanout address (only valid if already pinned), at the 9168bb0daffSRob Clark * specified orientation and x,y offset from top-left corner of buffer 9178bb0daffSRob Clark * (only valid for tiled 2d buffers) 9188bb0daffSRob Clark */ 9198bb0daffSRob Clark int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, 9208bb0daffSRob Clark int x, int y, dma_addr_t *paddr) 9218bb0daffSRob Clark { 9228bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9238bb0daffSRob Clark int ret = -EINVAL; 9248bb0daffSRob Clark 9258bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 9268bb0daffSRob Clark if ((omap_obj->paddr_cnt > 0) && omap_obj->block && 9278bb0daffSRob Clark (omap_obj->flags & OMAP_BO_TILED)) { 9288bb0daffSRob Clark *paddr = tiler_tsptr(omap_obj->block, orient, x, y); 9298bb0daffSRob Clark ret = 0; 9308bb0daffSRob Clark } 9318bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 9328bb0daffSRob Clark return ret; 9338bb0daffSRob Clark } 9348bb0daffSRob Clark 9358bb0daffSRob Clark /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 9368bb0daffSRob Clark int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 9378bb0daffSRob Clark { 9388bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9398bb0daffSRob Clark int ret = -EINVAL; 9408bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) 9418bb0daffSRob Clark ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 9428bb0daffSRob Clark return ret; 9438bb0daffSRob Clark } 9448bb0daffSRob Clark 9458bb0daffSRob Clark /* if !remap, and we don't have pages backing, then fail, rather than 9468bb0daffSRob Clark * increasing the pin count (which we don't really do yet anyways, 9478bb0daffSRob Clark * because we don't support swapping pages back out). And 'remap' 9488bb0daffSRob Clark * might not be quite the right name, but I wanted to keep it working 9498bb0daffSRob Clark * similarly to omap_gem_get_paddr(). Note though that mutex is not 9508bb0daffSRob Clark * aquired if !remap (because this can be called in atomic ctxt), 9518bb0daffSRob Clark * but probably omap_gem_get_paddr() should be changed to work in the 9528bb0daffSRob Clark * same way. If !remap, a matching omap_gem_put_pages() call is not 9538bb0daffSRob Clark * required (and should not be made). 9548bb0daffSRob Clark */ 9558bb0daffSRob Clark int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 9568bb0daffSRob Clark bool remap) 9578bb0daffSRob Clark { 9588bb0daffSRob Clark int ret; 9598bb0daffSRob Clark if (!remap) { 9608bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9618bb0daffSRob Clark if (!omap_obj->pages) 9628bb0daffSRob Clark return -ENOMEM; 9638bb0daffSRob Clark *pages = omap_obj->pages; 9648bb0daffSRob Clark return 0; 9658bb0daffSRob Clark } 9668bb0daffSRob Clark mutex_lock(&obj->dev->struct_mutex); 9678bb0daffSRob Clark ret = get_pages(obj, pages); 9688bb0daffSRob Clark mutex_unlock(&obj->dev->struct_mutex); 9698bb0daffSRob Clark return ret; 9708bb0daffSRob Clark } 9718bb0daffSRob Clark 9728bb0daffSRob Clark /* release pages when DMA no longer being performed */ 9738bb0daffSRob Clark int omap_gem_put_pages(struct drm_gem_object *obj) 9748bb0daffSRob Clark { 9758bb0daffSRob Clark /* do something here if we dynamically attach/detach pages.. at 9768bb0daffSRob Clark * least they would no longer need to be pinned if everyone has 9778bb0daffSRob Clark * released the pages.. 9788bb0daffSRob Clark */ 9798bb0daffSRob Clark return 0; 9808bb0daffSRob Clark } 9818bb0daffSRob Clark 982e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 9838bb0daffSRob Clark /* Get kernel virtual address for CPU access.. this more or less only 9848bb0daffSRob Clark * exists for omap_fbdev. This should be called with struct_mutex 9858bb0daffSRob Clark * held. 9868bb0daffSRob Clark */ 9878bb0daffSRob Clark void *omap_gem_vaddr(struct drm_gem_object *obj) 9888bb0daffSRob Clark { 9898bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9908bb0daffSRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 9918bb0daffSRob Clark if (!omap_obj->vaddr) { 9928bb0daffSRob Clark struct page **pages; 9938bb0daffSRob Clark int ret = get_pages(obj, &pages); 9948bb0daffSRob Clark if (ret) 9958bb0daffSRob Clark return ERR_PTR(ret); 9968bb0daffSRob Clark omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 9978bb0daffSRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 9988bb0daffSRob Clark } 9998bb0daffSRob Clark return omap_obj->vaddr; 10008bb0daffSRob Clark } 1001e1c1174fSLaurent Pinchart #endif 10028bb0daffSRob Clark 10037ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 10047ef93b0aSLaurent Pinchart * Power Management 10057ef93b0aSLaurent Pinchart */ 10068bb0daffSRob Clark 10078bb0daffSRob Clark #ifdef CONFIG_PM 10088bb0daffSRob Clark /* re-pin objects in DMM in resume path: */ 10098bb0daffSRob Clark int omap_gem_resume(struct device *dev) 10108bb0daffSRob Clark { 10118bb0daffSRob Clark struct drm_device *drm_dev = dev_get_drvdata(dev); 10128bb0daffSRob Clark struct omap_drm_private *priv = drm_dev->dev_private; 10138bb0daffSRob Clark struct omap_gem_object *omap_obj; 10148bb0daffSRob Clark int ret = 0; 10158bb0daffSRob Clark 10168bb0daffSRob Clark list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 10178bb0daffSRob Clark if (omap_obj->block) { 10188bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 10198bb0daffSRob Clark uint32_t npages = obj->size >> PAGE_SHIFT; 10208bb0daffSRob Clark WARN_ON(!omap_obj->pages); /* this can't happen */ 10218bb0daffSRob Clark ret = tiler_pin(omap_obj->block, 10228bb0daffSRob Clark omap_obj->pages, npages, 10238bb0daffSRob Clark omap_obj->roll, true); 10248bb0daffSRob Clark if (ret) { 10258bb0daffSRob Clark dev_err(dev, "could not repin: %d\n", ret); 10268bb0daffSRob Clark return ret; 10278bb0daffSRob Clark } 10288bb0daffSRob Clark } 10298bb0daffSRob Clark } 10308bb0daffSRob Clark 10318bb0daffSRob Clark return 0; 10328bb0daffSRob Clark } 10338bb0daffSRob Clark #endif 10348bb0daffSRob Clark 10357ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 10367ef93b0aSLaurent Pinchart * DebugFS 10377ef93b0aSLaurent Pinchart */ 10387ef93b0aSLaurent Pinchart 10398bb0daffSRob Clark #ifdef CONFIG_DEBUG_FS 10408bb0daffSRob Clark void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 10418bb0daffSRob Clark { 10428bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 10430de23977SDavid Herrmann uint64_t off; 10448bb0daffSRob Clark 10450de23977SDavid Herrmann off = drm_vma_node_start(&obj->vma_node); 10468bb0daffSRob Clark 10472d31ca3aSRussell King seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 10488bb0daffSRob Clark omap_obj->flags, obj->name, obj->refcount.refcount.counter, 10492d31ca3aSRussell King off, &omap_obj->paddr, omap_obj->paddr_cnt, 10508bb0daffSRob Clark omap_obj->vaddr, omap_obj->roll); 10518bb0daffSRob Clark 10528bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_TILED) { 10538bb0daffSRob Clark seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 10548bb0daffSRob Clark if (omap_obj->block) { 10558bb0daffSRob Clark struct tcm_area *area = &omap_obj->block->area; 10568bb0daffSRob Clark seq_printf(m, " (%dx%d, %dx%d)", 10578bb0daffSRob Clark area->p0.x, area->p0.y, 10588bb0daffSRob Clark area->p1.x, area->p1.y); 10598bb0daffSRob Clark } 10608bb0daffSRob Clark } else { 10618bb0daffSRob Clark seq_printf(m, " %d", obj->size); 10628bb0daffSRob Clark } 10638bb0daffSRob Clark 10648bb0daffSRob Clark seq_printf(m, "\n"); 10658bb0daffSRob Clark } 10668bb0daffSRob Clark 10678bb0daffSRob Clark void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 10688bb0daffSRob Clark { 10698bb0daffSRob Clark struct omap_gem_object *omap_obj; 10708bb0daffSRob Clark int count = 0; 10718bb0daffSRob Clark size_t size = 0; 10728bb0daffSRob Clark 10738bb0daffSRob Clark list_for_each_entry(omap_obj, list, mm_list) { 10748bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 10758bb0daffSRob Clark seq_printf(m, " "); 10768bb0daffSRob Clark omap_gem_describe(obj, m); 10778bb0daffSRob Clark count++; 10788bb0daffSRob Clark size += obj->size; 10798bb0daffSRob Clark } 10808bb0daffSRob Clark 10818bb0daffSRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 10828bb0daffSRob Clark } 10838bb0daffSRob Clark #endif 10848bb0daffSRob Clark 10857ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 10867ef93b0aSLaurent Pinchart * Buffer Synchronization 10878bb0daffSRob Clark */ 10888bb0daffSRob Clark 10897ef93b0aSLaurent Pinchart static DEFINE_SPINLOCK(sync_lock); 10907ef93b0aSLaurent Pinchart 10918bb0daffSRob Clark struct omap_gem_sync_waiter { 10928bb0daffSRob Clark struct list_head list; 10938bb0daffSRob Clark struct omap_gem_object *omap_obj; 10948bb0daffSRob Clark enum omap_gem_op op; 10958bb0daffSRob Clark uint32_t read_target, write_target; 10968bb0daffSRob Clark /* notify called w/ sync_lock held */ 10978bb0daffSRob Clark void (*notify)(void *arg); 10988bb0daffSRob Clark void *arg; 10998bb0daffSRob Clark }; 11008bb0daffSRob Clark 11018bb0daffSRob Clark /* list of omap_gem_sync_waiter.. the notify fxn gets called back when 11028bb0daffSRob Clark * the read and/or write target count is achieved which can call a user 11038bb0daffSRob Clark * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for 11048bb0daffSRob Clark * cpu access), etc. 11058bb0daffSRob Clark */ 11068bb0daffSRob Clark static LIST_HEAD(waiters); 11078bb0daffSRob Clark 11088bb0daffSRob Clark static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) 11098bb0daffSRob Clark { 11108bb0daffSRob Clark struct omap_gem_object *omap_obj = waiter->omap_obj; 11118bb0daffSRob Clark if ((waiter->op & OMAP_GEM_READ) && 1112f2cff0f3SArchit Taneja (omap_obj->sync->write_complete < waiter->write_target)) 11138bb0daffSRob Clark return true; 11148bb0daffSRob Clark if ((waiter->op & OMAP_GEM_WRITE) && 1115f2cff0f3SArchit Taneja (omap_obj->sync->read_complete < waiter->read_target)) 11168bb0daffSRob Clark return true; 11178bb0daffSRob Clark return false; 11188bb0daffSRob Clark } 11198bb0daffSRob Clark 11208bb0daffSRob Clark /* macro for sync debug.. */ 11218bb0daffSRob Clark #define SYNCDBG 0 11228bb0daffSRob Clark #define SYNC(fmt, ...) do { if (SYNCDBG) \ 11238bb0daffSRob Clark printk(KERN_ERR "%s:%d: "fmt"\n", \ 11248bb0daffSRob Clark __func__, __LINE__, ##__VA_ARGS__); \ 11258bb0daffSRob Clark } while (0) 11268bb0daffSRob Clark 11278bb0daffSRob Clark 11288bb0daffSRob Clark static void sync_op_update(void) 11298bb0daffSRob Clark { 11308bb0daffSRob Clark struct omap_gem_sync_waiter *waiter, *n; 11318bb0daffSRob Clark list_for_each_entry_safe(waiter, n, &waiters, list) { 11328bb0daffSRob Clark if (!is_waiting(waiter)) { 11338bb0daffSRob Clark list_del(&waiter->list); 11348bb0daffSRob Clark SYNC("notify: %p", waiter); 11358bb0daffSRob Clark waiter->notify(waiter->arg); 11368bb0daffSRob Clark kfree(waiter); 11378bb0daffSRob Clark } 11388bb0daffSRob Clark } 11398bb0daffSRob Clark } 11408bb0daffSRob Clark 11418bb0daffSRob Clark static inline int sync_op(struct drm_gem_object *obj, 11428bb0daffSRob Clark enum omap_gem_op op, bool start) 11438bb0daffSRob Clark { 11448bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 11458bb0daffSRob Clark int ret = 0; 11468bb0daffSRob Clark 11478bb0daffSRob Clark spin_lock(&sync_lock); 11488bb0daffSRob Clark 11498bb0daffSRob Clark if (!omap_obj->sync) { 11508bb0daffSRob Clark omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); 11518bb0daffSRob Clark if (!omap_obj->sync) { 11528bb0daffSRob Clark ret = -ENOMEM; 11538bb0daffSRob Clark goto unlock; 11548bb0daffSRob Clark } 11558bb0daffSRob Clark } 11568bb0daffSRob Clark 11578bb0daffSRob Clark if (start) { 11588bb0daffSRob Clark if (op & OMAP_GEM_READ) 11598bb0daffSRob Clark omap_obj->sync->read_pending++; 11608bb0daffSRob Clark if (op & OMAP_GEM_WRITE) 11618bb0daffSRob Clark omap_obj->sync->write_pending++; 11628bb0daffSRob Clark } else { 11638bb0daffSRob Clark if (op & OMAP_GEM_READ) 11648bb0daffSRob Clark omap_obj->sync->read_complete++; 11658bb0daffSRob Clark if (op & OMAP_GEM_WRITE) 11668bb0daffSRob Clark omap_obj->sync->write_complete++; 11678bb0daffSRob Clark sync_op_update(); 11688bb0daffSRob Clark } 11698bb0daffSRob Clark 11708bb0daffSRob Clark unlock: 11718bb0daffSRob Clark spin_unlock(&sync_lock); 11728bb0daffSRob Clark 11738bb0daffSRob Clark return ret; 11748bb0daffSRob Clark } 11758bb0daffSRob Clark 11768bb0daffSRob Clark /* mark the start of read and/or write operation */ 11778bb0daffSRob Clark int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) 11788bb0daffSRob Clark { 11798bb0daffSRob Clark return sync_op(obj, op, true); 11808bb0daffSRob Clark } 11818bb0daffSRob Clark 11828bb0daffSRob Clark int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) 11838bb0daffSRob Clark { 11848bb0daffSRob Clark return sync_op(obj, op, false); 11858bb0daffSRob Clark } 11868bb0daffSRob Clark 11878bb0daffSRob Clark static DECLARE_WAIT_QUEUE_HEAD(sync_event); 11888bb0daffSRob Clark 11898bb0daffSRob Clark static void sync_notify(void *arg) 11908bb0daffSRob Clark { 11918bb0daffSRob Clark struct task_struct **waiter_task = arg; 11928bb0daffSRob Clark *waiter_task = NULL; 11938bb0daffSRob Clark wake_up_all(&sync_event); 11948bb0daffSRob Clark } 11958bb0daffSRob Clark 11968bb0daffSRob Clark int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) 11978bb0daffSRob Clark { 11988bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 11998bb0daffSRob Clark int ret = 0; 12008bb0daffSRob Clark if (omap_obj->sync) { 12018bb0daffSRob Clark struct task_struct *waiter_task = current; 12028bb0daffSRob Clark struct omap_gem_sync_waiter *waiter = 12038bb0daffSRob Clark kzalloc(sizeof(*waiter), GFP_KERNEL); 12048bb0daffSRob Clark 12058bb0daffSRob Clark if (!waiter) 12068bb0daffSRob Clark return -ENOMEM; 12078bb0daffSRob Clark 12088bb0daffSRob Clark waiter->omap_obj = omap_obj; 12098bb0daffSRob Clark waiter->op = op; 12108bb0daffSRob Clark waiter->read_target = omap_obj->sync->read_pending; 12118bb0daffSRob Clark waiter->write_target = omap_obj->sync->write_pending; 12128bb0daffSRob Clark waiter->notify = sync_notify; 12138bb0daffSRob Clark waiter->arg = &waiter_task; 12148bb0daffSRob Clark 12158bb0daffSRob Clark spin_lock(&sync_lock); 12168bb0daffSRob Clark if (is_waiting(waiter)) { 12178bb0daffSRob Clark SYNC("waited: %p", waiter); 12188bb0daffSRob Clark list_add_tail(&waiter->list, &waiters); 12198bb0daffSRob Clark spin_unlock(&sync_lock); 12208bb0daffSRob Clark ret = wait_event_interruptible(sync_event, 12218bb0daffSRob Clark (waiter_task == NULL)); 12228bb0daffSRob Clark spin_lock(&sync_lock); 12238bb0daffSRob Clark if (waiter_task) { 12248bb0daffSRob Clark SYNC("interrupted: %p", waiter); 12258bb0daffSRob Clark /* we were interrupted */ 12268bb0daffSRob Clark list_del(&waiter->list); 12278bb0daffSRob Clark waiter_task = NULL; 12288bb0daffSRob Clark } else { 12298bb0daffSRob Clark /* freed in sync_op_update() */ 12308bb0daffSRob Clark waiter = NULL; 12318bb0daffSRob Clark } 12328bb0daffSRob Clark } 12338bb0daffSRob Clark spin_unlock(&sync_lock); 12348bb0daffSRob Clark kfree(waiter); 12358bb0daffSRob Clark } 12368bb0daffSRob Clark return ret; 12378bb0daffSRob Clark } 12388bb0daffSRob Clark 12398bb0daffSRob Clark /* call fxn(arg), either synchronously or asynchronously if the op 12408bb0daffSRob Clark * is currently blocked.. fxn() can be called from any context 12418bb0daffSRob Clark * 12428bb0daffSRob Clark * (TODO for now fxn is called back from whichever context calls 12433f50effdSTomi Valkeinen * omap_gem_op_finish().. but this could be better defined later 12448bb0daffSRob Clark * if needed) 12458bb0daffSRob Clark * 12468bb0daffSRob Clark * TODO more code in common w/ _sync().. 12478bb0daffSRob Clark */ 12488bb0daffSRob Clark int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, 12498bb0daffSRob Clark void (*fxn)(void *arg), void *arg) 12508bb0daffSRob Clark { 12518bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 12528bb0daffSRob Clark if (omap_obj->sync) { 12538bb0daffSRob Clark struct omap_gem_sync_waiter *waiter = 12548bb0daffSRob Clark kzalloc(sizeof(*waiter), GFP_ATOMIC); 12558bb0daffSRob Clark 12568bb0daffSRob Clark if (!waiter) 12578bb0daffSRob Clark return -ENOMEM; 12588bb0daffSRob Clark 12598bb0daffSRob Clark waiter->omap_obj = omap_obj; 12608bb0daffSRob Clark waiter->op = op; 12618bb0daffSRob Clark waiter->read_target = omap_obj->sync->read_pending; 12628bb0daffSRob Clark waiter->write_target = omap_obj->sync->write_pending; 12638bb0daffSRob Clark waiter->notify = fxn; 12648bb0daffSRob Clark waiter->arg = arg; 12658bb0daffSRob Clark 12668bb0daffSRob Clark spin_lock(&sync_lock); 12678bb0daffSRob Clark if (is_waiting(waiter)) { 12688bb0daffSRob Clark SYNC("waited: %p", waiter); 12698bb0daffSRob Clark list_add_tail(&waiter->list, &waiters); 12708bb0daffSRob Clark spin_unlock(&sync_lock); 12718bb0daffSRob Clark return 0; 12728bb0daffSRob Clark } 12738bb0daffSRob Clark 12748bb0daffSRob Clark spin_unlock(&sync_lock); 127515ec2ca9SSubhajit Paul 127615ec2ca9SSubhajit Paul kfree(waiter); 12778bb0daffSRob Clark } 12788bb0daffSRob Clark 12798bb0daffSRob Clark /* no waiting.. */ 12808bb0daffSRob Clark fxn(arg); 12818bb0daffSRob Clark 12828bb0daffSRob Clark return 0; 12838bb0daffSRob Clark } 12848bb0daffSRob Clark 12857ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 12867ef93b0aSLaurent Pinchart * Constructor & Destructor 12877ef93b0aSLaurent Pinchart */ 12887ef93b0aSLaurent Pinchart 12898bb0daffSRob Clark void omap_gem_free_object(struct drm_gem_object *obj) 12908bb0daffSRob Clark { 12918bb0daffSRob Clark struct drm_device *dev = obj->dev; 129276c4055fSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private; 12938bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 12948bb0daffSRob Clark 12958bb0daffSRob Clark evict(obj); 12968bb0daffSRob Clark 12978bb0daffSRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12988bb0daffSRob Clark 129976c4055fSTomi Valkeinen spin_lock(&priv->list_lock); 13008bb0daffSRob Clark list_del(&omap_obj->mm_list); 130176c4055fSTomi Valkeinen spin_unlock(&priv->list_lock); 13028bb0daffSRob Clark 13038bb0daffSRob Clark /* this means the object is still pinned.. which really should 13048bb0daffSRob Clark * not happen. I think.. 13058bb0daffSRob Clark */ 13068bb0daffSRob Clark WARN_ON(omap_obj->paddr_cnt > 0); 13078bb0daffSRob Clark 1308b22e6690SLaurent Pinchart if (omap_obj->pages) { 1309b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMABUF) 1310b22e6690SLaurent Pinchart kfree(omap_obj->pages); 1311b22e6690SLaurent Pinchart else 13128bb0daffSRob Clark omap_gem_detach_pages(obj); 1313b22e6690SLaurent Pinchart } 13148bb0daffSRob Clark 1315cdb0381dSLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { 1316f6e45661SLuis R. Rodriguez dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, 1317f6e45661SLuis R. Rodriguez omap_obj->paddr); 13188bb0daffSRob Clark } else if (omap_obj->vaddr) { 13198bb0daffSRob Clark vunmap(omap_obj->vaddr); 1320b22e6690SLaurent Pinchart } else if (obj->import_attach) { 1321b22e6690SLaurent Pinchart drm_prime_gem_destroy(obj, omap_obj->sgt); 13228bb0daffSRob Clark } 13238bb0daffSRob Clark 13248bb0daffSRob Clark kfree(omap_obj->sync); 13258bb0daffSRob Clark 13268bb0daffSRob Clark drm_gem_object_release(obj); 13278bb0daffSRob Clark 132800e9c7c7SLaurent Pinchart kfree(omap_obj); 13298bb0daffSRob Clark } 13308bb0daffSRob Clark 13318bb0daffSRob Clark /* GEM buffer object constructor */ 13328bb0daffSRob Clark struct drm_gem_object *omap_gem_new(struct drm_device *dev, 13338bb0daffSRob Clark union omap_gem_size gsize, uint32_t flags) 13348bb0daffSRob Clark { 13358bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 13368bb0daffSRob Clark struct omap_gem_object *omap_obj; 133792b4b445SLaurent Pinchart struct drm_gem_object *obj; 1338ab5a60c3SDavid Herrmann struct address_space *mapping; 13398bb0daffSRob Clark size_t size; 13408bb0daffSRob Clark int ret; 13418bb0daffSRob Clark 13429cba3b99SLaurent Pinchart /* Validate the flags and compute the memory and cache flags. */ 13438bb0daffSRob Clark if (flags & OMAP_BO_TILED) { 1344f4302747SLaurent Pinchart if (!priv->usergart) { 13458bb0daffSRob Clark dev_err(dev->dev, "Tiled buffers require DMM\n"); 134692b4b445SLaurent Pinchart return NULL; 13478bb0daffSRob Clark } 13488bb0daffSRob Clark 13499cba3b99SLaurent Pinchart /* 13509cba3b99SLaurent Pinchart * Tiled buffers are always shmem paged backed. When they are 13519cba3b99SLaurent Pinchart * scanned out, they are remapped into DMM/TILER. 13528bb0daffSRob Clark */ 13538bb0daffSRob Clark flags &= ~OMAP_BO_SCANOUT; 13549cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM; 13558bb0daffSRob Clark 13569cba3b99SLaurent Pinchart /* 13579cba3b99SLaurent Pinchart * Currently don't allow cached buffers. There is some caching 13589cba3b99SLaurent Pinchart * stuff that needs to be handled better. 13598bb0daffSRob Clark */ 13607cb0d6c1STomi Valkeinen flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); 13617cb0d6c1STomi Valkeinen flags |= tiler_get_cpu_cache_flags(); 13629cba3b99SLaurent Pinchart } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 13639cba3b99SLaurent Pinchart /* 1364b22e6690SLaurent Pinchart * OMAP_BO_SCANOUT hints that the buffer doesn't need to be 1365b22e6690SLaurent Pinchart * tiled. However, to lower the pressure on memory allocation, 1366b22e6690SLaurent Pinchart * use contiguous memory only if no TILER is available. 13679cba3b99SLaurent Pinchart */ 13689cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_DMA_API; 13693f50effdSTomi Valkeinen } else if (!(flags & OMAP_BO_MEM_DMABUF)) { 13709cba3b99SLaurent Pinchart /* 13713f50effdSTomi Valkeinen * All other buffers not backed by dma_buf are shmem-backed. 13729cba3b99SLaurent Pinchart */ 13739cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM; 13748bb0daffSRob Clark } 13758bb0daffSRob Clark 13769cba3b99SLaurent Pinchart /* Allocate the initialize the OMAP GEM object. */ 13778bb0daffSRob Clark omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1378fffddfd6SLinus Torvalds if (!omap_obj) 1379a903e3b6STomi Valkeinen return NULL; 13808bb0daffSRob Clark 13818bb0daffSRob Clark obj = &omap_obj->base; 13829cba3b99SLaurent Pinchart omap_obj->flags = flags; 13838bb0daffSRob Clark 13849cba3b99SLaurent Pinchart if (flags & OMAP_BO_TILED) { 13859cba3b99SLaurent Pinchart /* 13869cba3b99SLaurent Pinchart * For tiled buffers align dimensions to slot boundaries and 13879cba3b99SLaurent Pinchart * calculate size based on aligned dimensions. 13888bb0daffSRob Clark */ 13899cba3b99SLaurent Pinchart tiler_align(gem2fmt(flags), &gsize.tiled.width, 13909cba3b99SLaurent Pinchart &gsize.tiled.height); 13918bb0daffSRob Clark 13929cba3b99SLaurent Pinchart size = tiler_size(gem2fmt(flags), gsize.tiled.width, 13939cba3b99SLaurent Pinchart gsize.tiled.height); 13948bb0daffSRob Clark 13959cba3b99SLaurent Pinchart omap_obj->width = gsize.tiled.width; 13969cba3b99SLaurent Pinchart omap_obj->height = gsize.tiled.height; 13979cba3b99SLaurent Pinchart } else { 13989cba3b99SLaurent Pinchart size = PAGE_ALIGN(gsize.bytes); 13998bb0daffSRob Clark } 14008bb0daffSRob Clark 1401c2eb77ffSLaurent Pinchart /* Initialize the GEM object. */ 1402c2eb77ffSLaurent Pinchart if (!(flags & OMAP_BO_MEM_SHMEM)) { 1403c2eb77ffSLaurent Pinchart drm_gem_private_object_init(dev, obj, size); 1404c2eb77ffSLaurent Pinchart } else { 1405c2eb77ffSLaurent Pinchart ret = drm_gem_object_init(dev, obj, size); 1406c2eb77ffSLaurent Pinchart if (ret) 1407c2eb77ffSLaurent Pinchart goto err_free; 1408c2eb77ffSLaurent Pinchart 140993c76a3dSAl Viro mapping = obj->filp->f_mapping; 1410c2eb77ffSLaurent Pinchart mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1411c2eb77ffSLaurent Pinchart } 1412a903e3b6STomi Valkeinen 14139cba3b99SLaurent Pinchart /* Allocate memory if needed. */ 14149cba3b99SLaurent Pinchart if (flags & OMAP_BO_MEM_DMA_API) { 1415266c73b7SLinus Torvalds omap_obj->vaddr = dma_alloc_wc(dev->dev, size, 14169cba3b99SLaurent Pinchart &omap_obj->paddr, 14179cba3b99SLaurent Pinchart GFP_KERNEL); 14189cba3b99SLaurent Pinchart if (!omap_obj->vaddr) 1419c2eb77ffSLaurent Pinchart goto err_release; 14208bb0daffSRob Clark } 14218bb0daffSRob Clark 14228bb0daffSRob Clark spin_lock(&priv->list_lock); 14238bb0daffSRob Clark list_add(&omap_obj->mm_list, &priv->obj_list); 14248bb0daffSRob Clark spin_unlock(&priv->list_lock); 14258bb0daffSRob Clark 14268bb0daffSRob Clark return obj; 14278bb0daffSRob Clark 1428c2eb77ffSLaurent Pinchart err_release: 1429c2eb77ffSLaurent Pinchart drm_gem_object_release(obj); 1430c2eb77ffSLaurent Pinchart err_free: 1431c2eb77ffSLaurent Pinchart kfree(omap_obj); 14328bb0daffSRob Clark return NULL; 14338bb0daffSRob Clark } 14348bb0daffSRob Clark 1435b22e6690SLaurent Pinchart struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, 1436b22e6690SLaurent Pinchart struct sg_table *sgt) 1437b22e6690SLaurent Pinchart { 1438b22e6690SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 1439b22e6690SLaurent Pinchart struct omap_gem_object *omap_obj; 1440b22e6690SLaurent Pinchart struct drm_gem_object *obj; 1441b22e6690SLaurent Pinchart union omap_gem_size gsize; 1442b22e6690SLaurent Pinchart 1443b22e6690SLaurent Pinchart /* Without a DMM only physically contiguous buffers can be supported. */ 1444b22e6690SLaurent Pinchart if (sgt->orig_nents != 1 && !priv->has_dmm) 1445b22e6690SLaurent Pinchart return ERR_PTR(-EINVAL); 1446b22e6690SLaurent Pinchart 1447b22e6690SLaurent Pinchart mutex_lock(&dev->struct_mutex); 1448b22e6690SLaurent Pinchart 1449b22e6690SLaurent Pinchart gsize.bytes = PAGE_ALIGN(size); 1450b22e6690SLaurent Pinchart obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); 1451b22e6690SLaurent Pinchart if (!obj) { 1452b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM); 1453b22e6690SLaurent Pinchart goto done; 1454b22e6690SLaurent Pinchart } 1455b22e6690SLaurent Pinchart 1456b22e6690SLaurent Pinchart omap_obj = to_omap_bo(obj); 1457b22e6690SLaurent Pinchart omap_obj->sgt = sgt; 1458b22e6690SLaurent Pinchart 1459b22e6690SLaurent Pinchart if (sgt->orig_nents == 1) { 1460b22e6690SLaurent Pinchart omap_obj->paddr = sg_dma_address(sgt->sgl); 1461b22e6690SLaurent Pinchart } else { 1462b22e6690SLaurent Pinchart /* Create pages list from sgt */ 1463b22e6690SLaurent Pinchart struct sg_page_iter iter; 1464b22e6690SLaurent Pinchart struct page **pages; 1465b22e6690SLaurent Pinchart unsigned int npages; 1466b22e6690SLaurent Pinchart unsigned int i = 0; 1467b22e6690SLaurent Pinchart 1468b22e6690SLaurent Pinchart npages = DIV_ROUND_UP(size, PAGE_SIZE); 1469b22e6690SLaurent Pinchart pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); 1470b22e6690SLaurent Pinchart if (!pages) { 1471b22e6690SLaurent Pinchart omap_gem_free_object(obj); 1472b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM); 1473b22e6690SLaurent Pinchart goto done; 1474b22e6690SLaurent Pinchart } 1475b22e6690SLaurent Pinchart 1476b22e6690SLaurent Pinchart omap_obj->pages = pages; 1477b22e6690SLaurent Pinchart 1478b22e6690SLaurent Pinchart for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { 1479b22e6690SLaurent Pinchart pages[i++] = sg_page_iter_page(&iter); 1480b22e6690SLaurent Pinchart if (i > npages) 1481b22e6690SLaurent Pinchart break; 1482b22e6690SLaurent Pinchart } 1483b22e6690SLaurent Pinchart 1484b22e6690SLaurent Pinchart if (WARN_ON(i != npages)) { 1485b22e6690SLaurent Pinchart omap_gem_free_object(obj); 1486b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM); 1487b22e6690SLaurent Pinchart goto done; 1488b22e6690SLaurent Pinchart } 1489b22e6690SLaurent Pinchart } 1490b22e6690SLaurent Pinchart 1491b22e6690SLaurent Pinchart done: 1492b22e6690SLaurent Pinchart mutex_unlock(&dev->struct_mutex); 1493b22e6690SLaurent Pinchart return obj; 1494b22e6690SLaurent Pinchart } 1495b22e6690SLaurent Pinchart 14967ef93b0aSLaurent Pinchart /* convenience method to construct a GEM buffer object, and userspace handle */ 14977ef93b0aSLaurent Pinchart int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 14987ef93b0aSLaurent Pinchart union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 14997ef93b0aSLaurent Pinchart { 15007ef93b0aSLaurent Pinchart struct drm_gem_object *obj; 15017ef93b0aSLaurent Pinchart int ret; 15027ef93b0aSLaurent Pinchart 15037ef93b0aSLaurent Pinchart obj = omap_gem_new(dev, gsize, flags); 15047ef93b0aSLaurent Pinchart if (!obj) 15057ef93b0aSLaurent Pinchart return -ENOMEM; 15067ef93b0aSLaurent Pinchart 15077ef93b0aSLaurent Pinchart ret = drm_gem_handle_create(file, obj, handle); 15087ef93b0aSLaurent Pinchart if (ret) { 150974128a23SLaurent Pinchart omap_gem_free_object(obj); 15107ef93b0aSLaurent Pinchart return ret; 15117ef93b0aSLaurent Pinchart } 15127ef93b0aSLaurent Pinchart 15137ef93b0aSLaurent Pinchart /* drop reference from allocate - handle holds it now */ 15147ef93b0aSLaurent Pinchart drm_gem_object_unreference_unlocked(obj); 15157ef93b0aSLaurent Pinchart 15167ef93b0aSLaurent Pinchart return 0; 15177ef93b0aSLaurent Pinchart } 15187ef93b0aSLaurent Pinchart 15197ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 15207ef93b0aSLaurent Pinchart * Init & Cleanup 15217ef93b0aSLaurent Pinchart */ 15227ef93b0aSLaurent Pinchart 15237ef93b0aSLaurent Pinchart /* If DMM is used, we need to set some stuff up.. */ 15248bb0daffSRob Clark void omap_gem_init(struct drm_device *dev) 15258bb0daffSRob Clark { 15268bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 1527f4302747SLaurent Pinchart struct omap_drm_usergart *usergart; 15288bb0daffSRob Clark const enum tiler_fmt fmts[] = { 15298bb0daffSRob Clark TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 15308bb0daffSRob Clark }; 15318bb0daffSRob Clark int i, j; 15328bb0daffSRob Clark 15338bb0daffSRob Clark if (!dmm_is_available()) { 15348bb0daffSRob Clark /* DMM only supported on OMAP4 and later, so this isn't fatal */ 15358bb0daffSRob Clark dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 15368bb0daffSRob Clark return; 15378bb0daffSRob Clark } 15388bb0daffSRob Clark 1539fffddfd6SLinus Torvalds usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1540fffddfd6SLinus Torvalds if (!usergart) 15418bb0daffSRob Clark return; 15428bb0daffSRob Clark 15438bb0daffSRob Clark /* reserve 4k aligned/wide regions for userspace mappings: */ 15448bb0daffSRob Clark for (i = 0; i < ARRAY_SIZE(fmts); i++) { 15458bb0daffSRob Clark uint16_t h = 1, w = PAGE_SIZE >> i; 15468bb0daffSRob Clark tiler_align(fmts[i], &w, &h); 15478bb0daffSRob Clark /* note: since each region is 1 4kb page wide, and minimum 15488bb0daffSRob Clark * number of rows, the height ends up being the same as the 15498bb0daffSRob Clark * # of pages in the region 15508bb0daffSRob Clark */ 15518bb0daffSRob Clark usergart[i].height = h; 15528bb0daffSRob Clark usergart[i].height_shift = ilog2(h); 15538bb0daffSRob Clark usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 15548bb0daffSRob Clark usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 15558bb0daffSRob Clark for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1556f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 1557f4302747SLaurent Pinchart struct tiler_block *block; 1558f4302747SLaurent Pinchart 1559f4302747SLaurent Pinchart entry = &usergart[i].entry[j]; 1560f4302747SLaurent Pinchart block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); 15618bb0daffSRob Clark if (IS_ERR(block)) { 15628bb0daffSRob Clark dev_err(dev->dev, 15638bb0daffSRob Clark "reserve failed: %d, %d, %ld\n", 15648bb0daffSRob Clark i, j, PTR_ERR(block)); 15658bb0daffSRob Clark return; 15668bb0daffSRob Clark } 15678bb0daffSRob Clark entry->paddr = tiler_ssptr(block); 15688bb0daffSRob Clark entry->block = block; 15698bb0daffSRob Clark 15702d31ca3aSRussell King DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, 15712d31ca3aSRussell King &entry->paddr, 15728bb0daffSRob Clark usergart[i].stride_pfn << PAGE_SHIFT); 15738bb0daffSRob Clark } 15748bb0daffSRob Clark } 15758bb0daffSRob Clark 1576f4302747SLaurent Pinchart priv->usergart = usergart; 15778bb0daffSRob Clark priv->has_dmm = true; 15788bb0daffSRob Clark } 15798bb0daffSRob Clark 15808bb0daffSRob Clark void omap_gem_deinit(struct drm_device *dev) 15818bb0daffSRob Clark { 1582f4302747SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 1583f4302747SLaurent Pinchart 15848bb0daffSRob Clark /* I believe we can rely on there being no more outstanding GEM 15858bb0daffSRob Clark * objects which could depend on usergart/dmm at this point. 15868bb0daffSRob Clark */ 1587f4302747SLaurent Pinchart kfree(priv->usergart); 15888bb0daffSRob Clark } 1589