1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 28bb0daffSRob Clark /* 31b409fdaSAlexander A. Klimov * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ 48bb0daffSRob Clark * Author: Rob Clark <rob.clark@linaro.org> 58bb0daffSRob Clark */ 68bb0daffSRob Clark 781f6156cSSam Ravnborg #include <linux/dma-mapping.h> 82d802453SArnd Bergmann #include <linux/seq_file.h> 98bb0daffSRob Clark #include <linux/shmem_fs.h> 102d278f54SLaurent Pinchart #include <linux/spinlock.h> 1101c8f1c4SDan Williams #include <linux/pfn_t.h> 122d278f54SLaurent Pinchart 1381f6156cSSam Ravnborg #include <drm/drm_prime.h> 140de23977SDavid Herrmann #include <drm/drm_vma_manager.h> 158bb0daffSRob Clark 168bb0daffSRob Clark #include "omap_drv.h" 178bb0daffSRob Clark #include "omap_dmm_tiler.h" 188bb0daffSRob Clark 198bb0daffSRob Clark /* 208bb0daffSRob Clark * GEM buffer object implementation. 218bb0daffSRob Clark */ 228bb0daffSRob Clark 238bb0daffSRob Clark /* note: we use upper 8 bits of flags for driver-internal flags: */ 24cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */ 25cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */ 26b22e6690SLaurent Pinchart #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */ 278bb0daffSRob Clark 288bb0daffSRob Clark struct omap_gem_object { 298bb0daffSRob Clark struct drm_gem_object base; 308bb0daffSRob Clark 318bb0daffSRob Clark struct list_head mm_list; 328bb0daffSRob Clark 33dfe9cfccSLaurent Pinchart u32 flags; 348bb0daffSRob Clark 358bb0daffSRob Clark /** width/height for tiled formats (rounded up to slot boundaries) */ 36dfe9cfccSLaurent Pinchart u16 width, height; 378bb0daffSRob Clark 388bb0daffSRob Clark /** roll applied when mapping to DMM */ 39dfe9cfccSLaurent Pinchart u32 roll; 408bb0daffSRob Clark 41*1948d28dSIvaylo Dimitrov /** protects pin_cnt, block, pages, dma_addrs and vaddr */ 423cbd0c58SLaurent Pinchart struct mutex lock; 433cbd0c58SLaurent Pinchart 448bb0daffSRob Clark /** 4516869083SLaurent Pinchart * dma_addr contains the buffer DMA address. It is valid for 468bb0daffSRob Clark * 47b22e6690SLaurent Pinchart * - buffers allocated through the DMA mapping API (with the 48b22e6690SLaurent Pinchart * OMAP_BO_MEM_DMA_API flag set) 49b22e6690SLaurent Pinchart * 50b22e6690SLaurent Pinchart * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) 51b22e6690SLaurent Pinchart * if they are physically contiguous (when sgt->orig_nents == 1) 52b22e6690SLaurent Pinchart * 53*1948d28dSIvaylo Dimitrov * - buffers mapped through the TILER when pin_cnt is not zero, in which 54*1948d28dSIvaylo Dimitrov * case the DMA address points to the TILER aperture 55b22e6690SLaurent Pinchart * 56b22e6690SLaurent Pinchart * Physically contiguous buffers have their DMA address equal to the 57b22e6690SLaurent Pinchart * physical address as we don't remap those buffers through the TILER. 58b22e6690SLaurent Pinchart * 59b22e6690SLaurent Pinchart * Buffers mapped to the TILER have their DMA address pointing to the 60*1948d28dSIvaylo Dimitrov * TILER aperture. As TILER mappings are refcounted (through pin_cnt) 61*1948d28dSIvaylo Dimitrov * the DMA address must be accessed through omap_gem_pin() to ensure 62*1948d28dSIvaylo Dimitrov * that the mapping won't disappear unexpectedly. References must be 63*1948d28dSIvaylo Dimitrov * released with omap_gem_unpin(). 648bb0daffSRob Clark */ 6516869083SLaurent Pinchart dma_addr_t dma_addr; 668bb0daffSRob Clark 678bb0daffSRob Clark /** 68*1948d28dSIvaylo Dimitrov * # of users 698bb0daffSRob Clark */ 70*1948d28dSIvaylo Dimitrov refcount_t pin_cnt; 718bb0daffSRob Clark 728bb0daffSRob Clark /** 73b22e6690SLaurent Pinchart * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag 74b22e6690SLaurent Pinchart * is set and the sgt field is valid. 75b22e6690SLaurent Pinchart */ 76b22e6690SLaurent Pinchart struct sg_table *sgt; 77b22e6690SLaurent Pinchart 78b22e6690SLaurent Pinchart /** 798bb0daffSRob Clark * tiler block used when buffer is remapped in DMM/TILER. 808bb0daffSRob Clark */ 818bb0daffSRob Clark struct tiler_block *block; 828bb0daffSRob Clark 838bb0daffSRob Clark /** 848bb0daffSRob Clark * Array of backing pages, if allocated. Note that pages are never 858bb0daffSRob Clark * allocated for buffers originally allocated from contiguous memory 868bb0daffSRob Clark */ 878bb0daffSRob Clark struct page **pages; 888bb0daffSRob Clark 898bb0daffSRob Clark /** addresses corresponding to pages in above array */ 9057c22f7cSLaurent Pinchart dma_addr_t *dma_addrs; 918bb0daffSRob Clark 928bb0daffSRob Clark /** 938bb0daffSRob Clark * Virtual address, if mapped. 948bb0daffSRob Clark */ 958bb0daffSRob Clark void *vaddr; 968bb0daffSRob Clark }; 978bb0daffSRob Clark 987ef93b0aSLaurent Pinchart #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 998bb0daffSRob Clark 1008bb0daffSRob Clark /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 1018bb0daffSRob Clark * not necessarily pinned in TILER all the time, and (b) when they are 1028bb0daffSRob Clark * they are not necessarily page aligned, we reserve one or more small 1038bb0daffSRob Clark * regions in each of the 2d containers to use as a user-GART where we 1048bb0daffSRob Clark * can create a second page-aligned mapping of parts of the buffer 1058bb0daffSRob Clark * being accessed from userspace. 1068bb0daffSRob Clark * 1078bb0daffSRob Clark * Note that we could optimize slightly when we know that multiple 1088bb0daffSRob Clark * tiler containers are backed by the same PAT.. but I'll leave that 1098bb0daffSRob Clark * for later.. 1108bb0daffSRob Clark */ 1118bb0daffSRob Clark #define NUM_USERGART_ENTRIES 2 112f4302747SLaurent Pinchart struct omap_drm_usergart_entry { 1138bb0daffSRob Clark struct tiler_block *block; /* the reserved tiler block */ 11416869083SLaurent Pinchart dma_addr_t dma_addr; 1158bb0daffSRob Clark struct drm_gem_object *obj; /* the current pinned obj */ 1168bb0daffSRob Clark pgoff_t obj_pgoff; /* page offset of obj currently 1178bb0daffSRob Clark mapped in */ 1188bb0daffSRob Clark }; 119f4302747SLaurent Pinchart 120f4302747SLaurent Pinchart struct omap_drm_usergart { 121f4302747SLaurent Pinchart struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; 1228bb0daffSRob Clark int height; /* height in rows */ 1238bb0daffSRob Clark int height_shift; /* ilog2(height in rows) */ 1248bb0daffSRob Clark int slot_shift; /* ilog2(width per slot) */ 1258bb0daffSRob Clark int stride_pfn; /* stride in pages */ 1268bb0daffSRob Clark int last; /* index of last used entry */ 127f4302747SLaurent Pinchart }; 1288bb0daffSRob Clark 129b902f8f4SLaurent Pinchart /* ----------------------------------------------------------------------------- 130b902f8f4SLaurent Pinchart * Helpers 131b902f8f4SLaurent Pinchart */ 132b902f8f4SLaurent Pinchart 133b902f8f4SLaurent Pinchart /** get mmap offset */ 134dc8c9aeeSLaurent Pinchart u64 omap_gem_mmap_offset(struct drm_gem_object *obj) 135b902f8f4SLaurent Pinchart { 136b902f8f4SLaurent Pinchart struct drm_device *dev = obj->dev; 137b902f8f4SLaurent Pinchart int ret; 138b902f8f4SLaurent Pinchart size_t size; 139b902f8f4SLaurent Pinchart 140b902f8f4SLaurent Pinchart /* Make it mmapable */ 141b902f8f4SLaurent Pinchart size = omap_gem_mmap_size(obj); 142b902f8f4SLaurent Pinchart ret = drm_gem_create_mmap_offset_size(obj, size); 143b902f8f4SLaurent Pinchart if (ret) { 144b902f8f4SLaurent Pinchart dev_err(dev->dev, "could not allocate mmap offset\n"); 145b902f8f4SLaurent Pinchart return 0; 146b902f8f4SLaurent Pinchart } 147b902f8f4SLaurent Pinchart 148b902f8f4SLaurent Pinchart return drm_vma_node_offset_addr(&obj->vma_node); 149b902f8f4SLaurent Pinchart } 150b902f8f4SLaurent Pinchart 151620063e1SLaurent Pinchart static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) 1527ef93b0aSLaurent Pinchart { 153b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API) 154b22e6690SLaurent Pinchart return true; 155b22e6690SLaurent Pinchart 156b22e6690SLaurent Pinchart if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) 157b22e6690SLaurent Pinchart return true; 158b22e6690SLaurent Pinchart 159b22e6690SLaurent Pinchart return false; 1607ef93b0aSLaurent Pinchart } 1617ef93b0aSLaurent Pinchart 1627ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 1637ef93b0aSLaurent Pinchart * Eviction 1647ef93b0aSLaurent Pinchart */ 1658bb0daffSRob Clark 166620063e1SLaurent Pinchart static void omap_gem_evict_entry(struct drm_gem_object *obj, 167f4302747SLaurent Pinchart enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) 1688bb0daffSRob Clark { 1698bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 170f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 171f4302747SLaurent Pinchart int n = priv->usergart[fmt].height; 1728bb0daffSRob Clark size_t size = PAGE_SIZE * n; 173dc8c9aeeSLaurent Pinchart loff_t off = omap_gem_mmap_offset(obj) + 1748bb0daffSRob Clark (entry->obj_pgoff << PAGE_SHIFT); 175cc8dd766STomi Valkeinen const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); 1766796cb16SDavid Herrmann 1778bb0daffSRob Clark if (m > 1) { 1788bb0daffSRob Clark int i; 1798bb0daffSRob Clark /* if stride > than PAGE_SIZE then sparse mapping: */ 1808bb0daffSRob Clark for (i = n; i > 0; i--) { 1816796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 1828bb0daffSRob Clark off, PAGE_SIZE, 1); 1838bb0daffSRob Clark off += PAGE_SIZE * m; 1848bb0daffSRob Clark } 1858bb0daffSRob Clark } else { 1866796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping, 1876796cb16SDavid Herrmann off, size, 1); 1888bb0daffSRob Clark } 1898bb0daffSRob Clark 1908bb0daffSRob Clark entry->obj = NULL; 1918bb0daffSRob Clark } 1928bb0daffSRob Clark 1938bb0daffSRob Clark /* Evict a buffer from usergart, if it is mapped there */ 194620063e1SLaurent Pinchart static void omap_gem_evict(struct drm_gem_object *obj) 1958bb0daffSRob Clark { 1968bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 197f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 1988bb0daffSRob Clark 19948b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) { 2008bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 2018bb0daffSRob Clark int i; 2028bb0daffSRob Clark 2038bb0daffSRob Clark for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 204f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry = 205f4302747SLaurent Pinchart &priv->usergart[fmt].entry[i]; 206f4302747SLaurent Pinchart 2078bb0daffSRob Clark if (entry->obj == obj) 208620063e1SLaurent Pinchart omap_gem_evict_entry(obj, fmt, entry); 2098bb0daffSRob Clark } 2108bb0daffSRob Clark } 2118bb0daffSRob Clark } 2128bb0daffSRob Clark 2137ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 2147ef93b0aSLaurent Pinchart * Page Management 2158bb0daffSRob Clark */ 2168bb0daffSRob Clark 2173cbd0c58SLaurent Pinchart /* 2183cbd0c58SLaurent Pinchart * Ensure backing pages are allocated. Must be called with the omap_obj.lock 2193cbd0c58SLaurent Pinchart * held. 2203cbd0c58SLaurent Pinchart */ 2218bb0daffSRob Clark static int omap_gem_attach_pages(struct drm_gem_object *obj) 2228bb0daffSRob Clark { 2238bb0daffSRob Clark struct drm_device *dev = obj->dev; 2248bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 2258bb0daffSRob Clark struct page **pages; 2268bb0daffSRob Clark int npages = obj->size >> PAGE_SHIFT; 2278bb0daffSRob Clark int i, ret; 2288bb0daffSRob Clark dma_addr_t *addrs; 2298bb0daffSRob Clark 2303cbd0c58SLaurent Pinchart lockdep_assert_held(&omap_obj->lock); 2313cbd0c58SLaurent Pinchart 2322491244dSLaurent Pinchart /* 2332491244dSLaurent Pinchart * If not using shmem (in which case backing pages don't need to be 2342491244dSLaurent Pinchart * allocated) or if pages are already allocated we're done. 2352491244dSLaurent Pinchart */ 2362491244dSLaurent Pinchart if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages) 2372491244dSLaurent Pinchart return 0; 2388bb0daffSRob Clark 2390cdbe8acSDavid Herrmann pages = drm_gem_get_pages(obj); 2408bb0daffSRob Clark if (IS_ERR(pages)) { 2418bb0daffSRob Clark dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 2428bb0daffSRob Clark return PTR_ERR(pages); 2438bb0daffSRob Clark } 2448bb0daffSRob Clark 2458bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because 2468bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent: 2478bb0daffSRob Clark */ 2488bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 2496da2ec56SKees Cook addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); 2508bb0daffSRob Clark if (!addrs) { 2518bb0daffSRob Clark ret = -ENOMEM; 2528bb0daffSRob Clark goto free_pages; 2538bb0daffSRob Clark } 2548bb0daffSRob Clark 2558bb0daffSRob Clark for (i = 0; i < npages; i++) { 2568bb0daffSRob Clark addrs[i] = dma_map_page(dev->dev, pages[i], 25797817fd4SLaurent Pinchart 0, PAGE_SIZE, DMA_TO_DEVICE); 258579ef254STomi Valkeinen 259579ef254STomi Valkeinen if (dma_mapping_error(dev->dev, addrs[i])) { 260579ef254STomi Valkeinen dev_warn(dev->dev, 261579ef254STomi Valkeinen "%s: failed to map page\n", __func__); 262579ef254STomi Valkeinen 263579ef254STomi Valkeinen for (i = i - 1; i >= 0; --i) { 264579ef254STomi Valkeinen dma_unmap_page(dev->dev, addrs[i], 26597817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE); 266579ef254STomi Valkeinen } 267579ef254STomi Valkeinen 268579ef254STomi Valkeinen ret = -ENOMEM; 269579ef254STomi Valkeinen goto free_addrs; 270579ef254STomi Valkeinen } 2718bb0daffSRob Clark } 2728bb0daffSRob Clark } else { 2736396bb22SKees Cook addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL); 2748bb0daffSRob Clark if (!addrs) { 2758bb0daffSRob Clark ret = -ENOMEM; 2768bb0daffSRob Clark goto free_pages; 2778bb0daffSRob Clark } 2788bb0daffSRob Clark } 2798bb0daffSRob Clark 28057c22f7cSLaurent Pinchart omap_obj->dma_addrs = addrs; 2818bb0daffSRob Clark omap_obj->pages = pages; 2828bb0daffSRob Clark 2838bb0daffSRob Clark return 0; 2848bb0daffSRob Clark 285579ef254STomi Valkeinen free_addrs: 286579ef254STomi Valkeinen kfree(addrs); 2878bb0daffSRob Clark free_pages: 288ddcd09d6SRob Clark drm_gem_put_pages(obj, pages, true, false); 2898bb0daffSRob Clark 2908bb0daffSRob Clark return ret; 2918bb0daffSRob Clark } 2928bb0daffSRob Clark 2933cbd0c58SLaurent Pinchart /* Release backing pages. Must be called with the omap_obj.lock held. */ 2948bb0daffSRob Clark static void omap_gem_detach_pages(struct drm_gem_object *obj) 2958bb0daffSRob Clark { 2968bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 297930dc19cSLaurent Pinchart unsigned int npages = obj->size >> PAGE_SHIFT; 298930dc19cSLaurent Pinchart unsigned int i; 2998bb0daffSRob Clark 3003cbd0c58SLaurent Pinchart lockdep_assert_held(&omap_obj->lock); 3013cbd0c58SLaurent Pinchart 3028bb0daffSRob Clark for (i = 0; i < npages; i++) { 30357c22f7cSLaurent Pinchart if (omap_obj->dma_addrs[i]) 304930dc19cSLaurent Pinchart dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], 30597817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE); 3068bb0daffSRob Clark } 3078bb0daffSRob Clark 30857c22f7cSLaurent Pinchart kfree(omap_obj->dma_addrs); 30957c22f7cSLaurent Pinchart omap_obj->dma_addrs = NULL; 3108bb0daffSRob Clark 311ddcd09d6SRob Clark drm_gem_put_pages(obj, omap_obj->pages, true, false); 3128bb0daffSRob Clark omap_obj->pages = NULL; 3138bb0daffSRob Clark } 3148bb0daffSRob Clark 3158bb0daffSRob Clark /* get buffer flags */ 316dfe9cfccSLaurent Pinchart u32 omap_gem_flags(struct drm_gem_object *obj) 3178bb0daffSRob Clark { 3188bb0daffSRob Clark return to_omap_bo(obj)->flags; 3198bb0daffSRob Clark } 3208bb0daffSRob Clark 3218bb0daffSRob Clark /** get mmap size */ 3228bb0daffSRob Clark size_t omap_gem_mmap_size(struct drm_gem_object *obj) 3238bb0daffSRob Clark { 3248bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3258bb0daffSRob Clark size_t size = obj->size; 3268bb0daffSRob Clark 32748b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) { 3288bb0daffSRob Clark /* for tiled buffers, the virtual size has stride rounded up 3298bb0daffSRob Clark * to 4kb.. (to hide the fact that row n+1 might start 16kb or 3308bb0daffSRob Clark * 32kb later!). But we don't back the entire buffer with 3318bb0daffSRob Clark * pages, only the valid picture part.. so need to adjust for 3328bb0daffSRob Clark * this in the size used to mmap and generate mmap offset 3338bb0daffSRob Clark */ 3348bb0daffSRob Clark size = tiler_vsize(gem2fmt(omap_obj->flags), 3358bb0daffSRob Clark omap_obj->width, omap_obj->height); 3368bb0daffSRob Clark } 3378bb0daffSRob Clark 3388bb0daffSRob Clark return size; 3398bb0daffSRob Clark } 3408bb0daffSRob Clark 3417ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 3427ef93b0aSLaurent Pinchart * Fault Handling 3437ef93b0aSLaurent Pinchart */ 3447ef93b0aSLaurent Pinchart 3458bb0daffSRob Clark /* Normal handling for the case of faulting in non-tiled buffers */ 346620063e1SLaurent Pinchart static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj, 3478bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 3488bb0daffSRob Clark { 3498bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 3508bb0daffSRob Clark unsigned long pfn; 3518bb0daffSRob Clark pgoff_t pgoff; 3528bb0daffSRob Clark 3538bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 3541a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 3558bb0daffSRob Clark 3568bb0daffSRob Clark if (omap_obj->pages) { 357d61ce7daSLaurent Pinchart omap_gem_cpu_sync_page(obj, pgoff); 3588bb0daffSRob Clark pfn = page_to_pfn(omap_obj->pages[pgoff]); 3598bb0daffSRob Clark } else { 360620063e1SLaurent Pinchart BUG_ON(!omap_gem_is_contiguous(omap_obj)); 36116869083SLaurent Pinchart pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff; 3628bb0daffSRob Clark } 3638bb0daffSRob Clark 3641a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 3658bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 3668bb0daffSRob Clark 3676ada1328SSouptick Joarder return vmf_insert_mixed(vma, vmf->address, 3686ada1328SSouptick Joarder __pfn_to_pfn_t(pfn, PFN_DEV)); 3698bb0daffSRob Clark } 3708bb0daffSRob Clark 3718bb0daffSRob Clark /* Special handling for the case of faulting in 2d tiled buffers */ 372620063e1SLaurent Pinchart static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, 3738bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf) 3748bb0daffSRob Clark { 3758bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 376f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private; 377f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 3788bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 3798bb0daffSRob Clark struct page *pages[64]; /* XXX is this too much to have on stack? */ 3808bb0daffSRob Clark unsigned long pfn; 3818bb0daffSRob Clark pgoff_t pgoff, base_pgoff; 3821a29d85eSJan Kara unsigned long vaddr; 3836ada1328SSouptick Joarder int i, err, slots; 3846ada1328SSouptick Joarder vm_fault_t ret = VM_FAULT_NOPAGE; 3858bb0daffSRob Clark 3868bb0daffSRob Clark /* 3878bb0daffSRob Clark * Note the height of the slot is also equal to the number of pages 3888bb0daffSRob Clark * that need to be mapped in to fill 4kb wide CPU page. If the slot 3898bb0daffSRob Clark * height is 64, then 64 pages fill a 4kb wide by 64 row region. 3908bb0daffSRob Clark */ 391f4302747SLaurent Pinchart const int n = priv->usergart[fmt].height; 392f4302747SLaurent Pinchart const int n_shift = priv->usergart[fmt].height_shift; 3938bb0daffSRob Clark 3948bb0daffSRob Clark /* 3958bb0daffSRob Clark * If buffer width in bytes > PAGE_SIZE then the virtual stride is 3968bb0daffSRob Clark * rounded up to next multiple of PAGE_SIZE.. this need to be taken 3978bb0daffSRob Clark * into account in some of the math, so figure out virtual stride 3988bb0daffSRob Clark * in pages 3998bb0daffSRob Clark */ 400cc8dd766STomi Valkeinen const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); 4018bb0daffSRob Clark 4028bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 4031a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 4048bb0daffSRob Clark 4058bb0daffSRob Clark /* 4068bb0daffSRob Clark * Actual address we start mapping at is rounded down to previous slot 4078bb0daffSRob Clark * boundary in the y direction: 4088bb0daffSRob Clark */ 4098bb0daffSRob Clark base_pgoff = round_down(pgoff, m << n_shift); 4108bb0daffSRob Clark 4118bb0daffSRob Clark /* figure out buffer width in slots */ 412f4302747SLaurent Pinchart slots = omap_obj->width >> priv->usergart[fmt].slot_shift; 4138bb0daffSRob Clark 4141a29d85eSJan Kara vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); 4158bb0daffSRob Clark 416f4302747SLaurent Pinchart entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; 4178bb0daffSRob Clark 4188bb0daffSRob Clark /* evict previous buffer using this usergart entry, if any: */ 4198bb0daffSRob Clark if (entry->obj) 420620063e1SLaurent Pinchart omap_gem_evict_entry(entry->obj, fmt, entry); 4218bb0daffSRob Clark 4228bb0daffSRob Clark entry->obj = obj; 4238bb0daffSRob Clark entry->obj_pgoff = base_pgoff; 4248bb0daffSRob Clark 4258bb0daffSRob Clark /* now convert base_pgoff to phys offset from virt offset: */ 4268bb0daffSRob Clark base_pgoff = (base_pgoff >> n_shift) * slots; 4278bb0daffSRob Clark 4288bb0daffSRob Clark /* for wider-than 4k.. figure out which part of the slot-row we want: */ 4298bb0daffSRob Clark if (m > 1) { 4308bb0daffSRob Clark int off = pgoff % m; 4318bb0daffSRob Clark entry->obj_pgoff += off; 4328bb0daffSRob Clark base_pgoff /= m; 4338bb0daffSRob Clark slots = min(slots - (off << n_shift), n); 4348bb0daffSRob Clark base_pgoff += off << n_shift; 4358bb0daffSRob Clark vaddr += off << PAGE_SHIFT; 4368bb0daffSRob Clark } 4378bb0daffSRob Clark 4388bb0daffSRob Clark /* 4398bb0daffSRob Clark * Map in pages. Beyond the valid pixel part of the buffer, we set 4408bb0daffSRob Clark * pages[i] to NULL to get a dummy page mapped in.. if someone 4418bb0daffSRob Clark * reads/writes it they will get random/undefined content, but at 4428bb0daffSRob Clark * least it won't be corrupting whatever other random page used to 4438bb0daffSRob Clark * be mapped in, or other undefined behavior. 4448bb0daffSRob Clark */ 4458bb0daffSRob Clark memcpy(pages, &omap_obj->pages[base_pgoff], 4468bb0daffSRob Clark sizeof(struct page *) * slots); 4478bb0daffSRob Clark memset(pages + slots, 0, 4488bb0daffSRob Clark sizeof(struct page *) * (n - slots)); 4498bb0daffSRob Clark 4506ada1328SSouptick Joarder err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 4516ada1328SSouptick Joarder if (err) { 4526ada1328SSouptick Joarder ret = vmf_error(err); 4536ada1328SSouptick Joarder dev_err(obj->dev->dev, "failed to pin: %d\n", err); 4548bb0daffSRob Clark return ret; 4558bb0daffSRob Clark } 4568bb0daffSRob Clark 45716869083SLaurent Pinchart pfn = entry->dma_addr >> PAGE_SHIFT; 4588bb0daffSRob Clark 4591a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 4608bb0daffSRob Clark pfn, pfn << PAGE_SHIFT); 4618bb0daffSRob Clark 4628bb0daffSRob Clark for (i = n; i > 0; i--) { 4636ada1328SSouptick Joarder ret = vmf_insert_mixed(vma, 4646ada1328SSouptick Joarder vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); 4656ada1328SSouptick Joarder if (ret & VM_FAULT_ERROR) 4666ada1328SSouptick Joarder break; 467f4302747SLaurent Pinchart pfn += priv->usergart[fmt].stride_pfn; 4688bb0daffSRob Clark vaddr += PAGE_SIZE * m; 4698bb0daffSRob Clark } 4708bb0daffSRob Clark 4718bb0daffSRob Clark /* simple round-robin: */ 472f4302747SLaurent Pinchart priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) 473f4302747SLaurent Pinchart % NUM_USERGART_ENTRIES; 4748bb0daffSRob Clark 4756ada1328SSouptick Joarder return ret; 4768bb0daffSRob Clark } 4778bb0daffSRob Clark 4788bb0daffSRob Clark /** 4798bb0daffSRob Clark * omap_gem_fault - pagefault handler for GEM objects 4808bb0daffSRob Clark * @vmf: fault detail 4818bb0daffSRob Clark * 4828bb0daffSRob Clark * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 4838bb0daffSRob Clark * does most of the work for us including the actual map/unmap calls 4848bb0daffSRob Clark * but we need to do the actual page work. 4858bb0daffSRob Clark * 4868bb0daffSRob Clark * The VMA was set up by GEM. In doing so it also ensured that the 4878bb0daffSRob Clark * vma->vm_private_data points to the GEM object that is backing this 4888bb0daffSRob Clark * mapping. 4898bb0daffSRob Clark */ 490c5ca5e02SThomas Zimmermann static vm_fault_t omap_gem_fault(struct vm_fault *vmf) 4918bb0daffSRob Clark { 49211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 4938bb0daffSRob Clark struct drm_gem_object *obj = vma->vm_private_data; 4948bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 4956ada1328SSouptick Joarder int err; 4966ada1328SSouptick Joarder vm_fault_t ret; 4978bb0daffSRob Clark 4988bb0daffSRob Clark /* Make sure we don't parallel update on a fault, nor move or remove 4998bb0daffSRob Clark * something from beneath our feet 5008bb0daffSRob Clark */ 5013cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 5028bb0daffSRob Clark 5038bb0daffSRob Clark /* if a shmem backed object, make sure we have pages attached now */ 5042491244dSLaurent Pinchart err = omap_gem_attach_pages(obj); 5056ada1328SSouptick Joarder if (err) { 5066ada1328SSouptick Joarder ret = vmf_error(err); 5078bb0daffSRob Clark goto fail; 5086ada1328SSouptick Joarder } 5098bb0daffSRob Clark 5108bb0daffSRob Clark /* where should we do corresponding put_pages().. we are mapping 5118bb0daffSRob Clark * the original page, rather than thru a GART, so we can't rely 5128bb0daffSRob Clark * on eviction to trigger this. But munmap() or all mappings should 5138bb0daffSRob Clark * probably trigger put_pages()? 5148bb0daffSRob Clark */ 5158bb0daffSRob Clark 51648b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) 517620063e1SLaurent Pinchart ret = omap_gem_fault_2d(obj, vma, vmf); 5188bb0daffSRob Clark else 519620063e1SLaurent Pinchart ret = omap_gem_fault_1d(obj, vma, vmf); 5208bb0daffSRob Clark 5218bb0daffSRob Clark 5228bb0daffSRob Clark fail: 5233cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 5246ada1328SSouptick Joarder return ret; 5258bb0daffSRob Clark } 5268bb0daffSRob Clark 5278bb0daffSRob Clark /** We override mainly to fix up some of the vm mapping flags.. */ 5288bb0daffSRob Clark int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 5298bb0daffSRob Clark { 5308bb0daffSRob Clark int ret; 5318bb0daffSRob Clark 5328bb0daffSRob Clark ret = drm_gem_mmap(filp, vma); 5338bb0daffSRob Clark if (ret) { 5348bb0daffSRob Clark DBG("mmap failed: %d", ret); 5358bb0daffSRob Clark return ret; 5368bb0daffSRob Clark } 5378bb0daffSRob Clark 5388bb0daffSRob Clark return omap_gem_mmap_obj(vma->vm_private_data, vma); 5398bb0daffSRob Clark } 5408bb0daffSRob Clark 5418bb0daffSRob Clark int omap_gem_mmap_obj(struct drm_gem_object *obj, 5428bb0daffSRob Clark struct vm_area_struct *vma) 5438bb0daffSRob Clark { 5448bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 5458bb0daffSRob Clark 5468bb0daffSRob Clark vma->vm_flags &= ~VM_PFNMAP; 5478bb0daffSRob Clark vma->vm_flags |= VM_MIXEDMAP; 5488bb0daffSRob Clark 5498bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_WC) { 5508bb0daffSRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 5518bb0daffSRob Clark } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 5528bb0daffSRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 5538bb0daffSRob Clark } else { 5548bb0daffSRob Clark /* 5558bb0daffSRob Clark * We do have some private objects, at least for scanout buffers 5568bb0daffSRob Clark * on hardware without DMM/TILER. But these are allocated write- 5578bb0daffSRob Clark * combine 5588bb0daffSRob Clark */ 5598bb0daffSRob Clark if (WARN_ON(!obj->filp)) 5608bb0daffSRob Clark return -EINVAL; 5618bb0daffSRob Clark 5628bb0daffSRob Clark /* 5638bb0daffSRob Clark * Shunt off cached objs to shmem file so they have their own 5648bb0daffSRob Clark * address_space (so unmap_mapping_range does what we want, 5658bb0daffSRob Clark * in particular in the case of mmap'd dmabufs) 5668bb0daffSRob Clark */ 5678bb0daffSRob Clark vma->vm_pgoff = 0; 568295992fbSChristian König vma_set_file(vma, obj->filp); 5698bb0daffSRob Clark 5708bb0daffSRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 5718bb0daffSRob Clark } 5728bb0daffSRob Clark 5738bb0daffSRob Clark return 0; 5748bb0daffSRob Clark } 5758bb0daffSRob Clark 5767ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 5777ef93b0aSLaurent Pinchart * Dumb Buffers 5787ef93b0aSLaurent Pinchart */ 5798bb0daffSRob Clark 5808bb0daffSRob Clark /** 5818bb0daffSRob Clark * omap_gem_dumb_create - create a dumb buffer 582567cd704SLee Jones * @file: our client file 5838bb0daffSRob Clark * @dev: our device 5848bb0daffSRob Clark * @args: the requested arguments copied from userspace 5858bb0daffSRob Clark * 5868bb0daffSRob Clark * Allocate a buffer suitable for use for a frame buffer of the 5878bb0daffSRob Clark * form described by user space. Give userspace a handle by which 5888bb0daffSRob Clark * to reference it. 5898bb0daffSRob Clark */ 5908bb0daffSRob Clark int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 5918bb0daffSRob Clark struct drm_mode_create_dumb *args) 5928bb0daffSRob Clark { 5938bb0daffSRob Clark union omap_gem_size gsize; 5948bb0daffSRob Clark 595ce481edaSTomi Valkeinen args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 5966a5228fdSTomi Valkeinen 5978bb0daffSRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 5988bb0daffSRob Clark 5998bb0daffSRob Clark gsize = (union omap_gem_size){ 6008bb0daffSRob Clark .bytes = args->size, 6018bb0daffSRob Clark }; 6028bb0daffSRob Clark 6038bb0daffSRob Clark return omap_gem_new_handle(dev, file, gsize, 6048bb0daffSRob Clark OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 6058bb0daffSRob Clark } 6068bb0daffSRob Clark 6078bb0daffSRob Clark /** 6088bb0daffSRob Clark * omap_gem_dumb_map - buffer mapping for dumb interface 6098bb0daffSRob Clark * @file: our drm client file 6108bb0daffSRob Clark * @dev: drm device 6118bb0daffSRob Clark * @handle: GEM handle to the object (from dumb_create) 612567cd704SLee Jones * @offset: memory map offset placeholder 6138bb0daffSRob Clark * 6148bb0daffSRob Clark * Do the necessary setup to allow the mapping of the frame buffer 6158bb0daffSRob Clark * into user memory. We don't have to do much here at the moment. 6168bb0daffSRob Clark */ 6178bb0daffSRob Clark int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 618dfe9cfccSLaurent Pinchart u32 handle, u64 *offset) 6198bb0daffSRob Clark { 6208bb0daffSRob Clark struct drm_gem_object *obj; 6218bb0daffSRob Clark int ret = 0; 6228bb0daffSRob Clark 6238bb0daffSRob Clark /* GEM does all our handle to object mapping */ 624a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 6258bb0daffSRob Clark if (obj == NULL) { 6268bb0daffSRob Clark ret = -ENOENT; 6278bb0daffSRob Clark goto fail; 6288bb0daffSRob Clark } 6298bb0daffSRob Clark 6308bb0daffSRob Clark *offset = omap_gem_mmap_offset(obj); 6318bb0daffSRob Clark 632d742cdd6SEmil Velikov drm_gem_object_put(obj); 6338bb0daffSRob Clark 6348bb0daffSRob Clark fail: 6358bb0daffSRob Clark return ret; 6368bb0daffSRob Clark } 6378bb0daffSRob Clark 638e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 6398bb0daffSRob Clark /* Set scrolling position. This allows us to implement fast scrolling 6408bb0daffSRob Clark * for console. 6418bb0daffSRob Clark * 6428bb0daffSRob Clark * Call only from non-atomic contexts. 6438bb0daffSRob Clark */ 644dfe9cfccSLaurent Pinchart int omap_gem_roll(struct drm_gem_object *obj, u32 roll) 6458bb0daffSRob Clark { 6468bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 647dfe9cfccSLaurent Pinchart u32 npages = obj->size >> PAGE_SHIFT; 6488bb0daffSRob Clark int ret = 0; 6498bb0daffSRob Clark 6508bb0daffSRob Clark if (roll > npages) { 6518bb0daffSRob Clark dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 6528bb0daffSRob Clark return -EINVAL; 6538bb0daffSRob Clark } 6548bb0daffSRob Clark 6558bb0daffSRob Clark omap_obj->roll = roll; 6568bb0daffSRob Clark 6573cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 6588bb0daffSRob Clark 6598bb0daffSRob Clark /* if we aren't mapped yet, we don't need to do anything */ 6608bb0daffSRob Clark if (omap_obj->block) { 6612491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj); 6628bb0daffSRob Clark if (ret) 6638bb0daffSRob Clark goto fail; 6642491244dSLaurent Pinchart 6652491244dSLaurent Pinchart ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, 6662491244dSLaurent Pinchart roll, true); 6678bb0daffSRob Clark if (ret) 6688bb0daffSRob Clark dev_err(obj->dev->dev, "could not repin: %d\n", ret); 6698bb0daffSRob Clark } 6708bb0daffSRob Clark 6718bb0daffSRob Clark fail: 6723cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 6738bb0daffSRob Clark 6748bb0daffSRob Clark return ret; 6758bb0daffSRob Clark } 676e1c1174fSLaurent Pinchart #endif 6778bb0daffSRob Clark 6787ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 6797ef93b0aSLaurent Pinchart * Memory Management & DMA Sync 6807ef93b0aSLaurent Pinchart */ 6817ef93b0aSLaurent Pinchart 68224fbaca0SLaurent Pinchart /* 68324fbaca0SLaurent Pinchart * shmem buffers that are mapped cached are not coherent. 68424fbaca0SLaurent Pinchart * 68524fbaca0SLaurent Pinchart * We keep track of dirty pages using page faulting to perform cache management. 68624fbaca0SLaurent Pinchart * When a page is mapped to the CPU in read/write mode the device can't access 68724fbaca0SLaurent Pinchart * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device 68824fbaca0SLaurent Pinchart * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is 68924fbaca0SLaurent Pinchart * unmapped from the CPU. 6907ef93b0aSLaurent Pinchart */ 691620063e1SLaurent Pinchart static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj) 6927ef93b0aSLaurent Pinchart { 6937ef93b0aSLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj); 694cdb0381dSLaurent Pinchart 69524fbaca0SLaurent Pinchart return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) && 69624fbaca0SLaurent Pinchart ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED)); 6977ef93b0aSLaurent Pinchart } 6988bb0daffSRob Clark 6998bb0daffSRob Clark /* Sync the buffer for CPU access.. note pages should already be 7008bb0daffSRob Clark * attached, ie. omap_gem_get_pages() 7018bb0daffSRob Clark */ 702d61ce7daSLaurent Pinchart void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) 7038bb0daffSRob Clark { 7048bb0daffSRob Clark struct drm_device *dev = obj->dev; 7058bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7068bb0daffSRob Clark 707620063e1SLaurent Pinchart if (omap_gem_is_cached_coherent(obj)) 70824fbaca0SLaurent Pinchart return; 70924fbaca0SLaurent Pinchart 71024fbaca0SLaurent Pinchart if (omap_obj->dma_addrs[pgoff]) { 71157c22f7cSLaurent Pinchart dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], 71297817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE); 71357c22f7cSLaurent Pinchart omap_obj->dma_addrs[pgoff] = 0; 7148bb0daffSRob Clark } 7158bb0daffSRob Clark } 7168bb0daffSRob Clark 7178bb0daffSRob Clark /* sync the buffer for DMA access */ 718d61ce7daSLaurent Pinchart void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, 7198bb0daffSRob Clark enum dma_data_direction dir) 7208bb0daffSRob Clark { 7218bb0daffSRob Clark struct drm_device *dev = obj->dev; 7228bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 7238bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT; 7248bb0daffSRob Clark struct page **pages = omap_obj->pages; 7258bb0daffSRob Clark bool dirty = false; 7268bb0daffSRob Clark 727620063e1SLaurent Pinchart if (omap_gem_is_cached_coherent(obj)) 7284fa6ce48SLaurent Pinchart return; 7294fa6ce48SLaurent Pinchart 7308bb0daffSRob Clark for (i = 0; i < npages; i++) { 73157c22f7cSLaurent Pinchart if (!omap_obj->dma_addrs[i]) { 732a3d6345dSTomi Valkeinen dma_addr_t addr; 733a3d6345dSTomi Valkeinen 734a3d6345dSTomi Valkeinen addr = dma_map_page(dev->dev, pages[i], 0, 73597817fd4SLaurent Pinchart PAGE_SIZE, dir); 736a3d6345dSTomi Valkeinen if (dma_mapping_error(dev->dev, addr)) { 7374fa6ce48SLaurent Pinchart dev_warn(dev->dev, "%s: failed to map page\n", 738a3d6345dSTomi Valkeinen __func__); 739a3d6345dSTomi Valkeinen break; 740a3d6345dSTomi Valkeinen } 741a3d6345dSTomi Valkeinen 7428bb0daffSRob Clark dirty = true; 74357c22f7cSLaurent Pinchart omap_obj->dma_addrs[i] = addr; 7448bb0daffSRob Clark } 7458bb0daffSRob Clark } 7468bb0daffSRob Clark 7478bb0daffSRob Clark if (dirty) { 7488bb0daffSRob Clark unmap_mapping_range(obj->filp->f_mapping, 0, 7498bb0daffSRob Clark omap_gem_mmap_size(obj), 1); 7508bb0daffSRob Clark } 7518bb0daffSRob Clark } 7528bb0daffSRob Clark 75386ad0397SIvaylo Dimitrov static int omap_gem_pin_tiler(struct drm_gem_object *obj) 75486ad0397SIvaylo Dimitrov { 75586ad0397SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj); 75686ad0397SIvaylo Dimitrov u32 npages = obj->size >> PAGE_SHIFT; 75786ad0397SIvaylo Dimitrov enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 75886ad0397SIvaylo Dimitrov struct tiler_block *block; 75986ad0397SIvaylo Dimitrov int ret; 76086ad0397SIvaylo Dimitrov 76186ad0397SIvaylo Dimitrov BUG_ON(omap_obj->block); 76286ad0397SIvaylo Dimitrov 76386ad0397SIvaylo Dimitrov if (omap_obj->flags & OMAP_BO_TILED_MASK) { 76486ad0397SIvaylo Dimitrov block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height, 76586ad0397SIvaylo Dimitrov PAGE_SIZE); 76686ad0397SIvaylo Dimitrov } else { 76786ad0397SIvaylo Dimitrov block = tiler_reserve_1d(obj->size); 76886ad0397SIvaylo Dimitrov } 76986ad0397SIvaylo Dimitrov 77086ad0397SIvaylo Dimitrov if (IS_ERR(block)) { 77186ad0397SIvaylo Dimitrov ret = PTR_ERR(block); 77286ad0397SIvaylo Dimitrov dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt); 77386ad0397SIvaylo Dimitrov goto fail; 77486ad0397SIvaylo Dimitrov } 77586ad0397SIvaylo Dimitrov 77686ad0397SIvaylo Dimitrov /* TODO: enable async refill.. */ 77786ad0397SIvaylo Dimitrov ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true); 77886ad0397SIvaylo Dimitrov if (ret) { 77986ad0397SIvaylo Dimitrov tiler_release(block); 78086ad0397SIvaylo Dimitrov dev_err(obj->dev->dev, "could not pin: %d\n", ret); 78186ad0397SIvaylo Dimitrov goto fail; 78286ad0397SIvaylo Dimitrov } 78386ad0397SIvaylo Dimitrov 78486ad0397SIvaylo Dimitrov omap_obj->dma_addr = tiler_ssptr(block); 78586ad0397SIvaylo Dimitrov omap_obj->block = block; 78686ad0397SIvaylo Dimitrov 78786ad0397SIvaylo Dimitrov DBG("got dma address: %pad", &omap_obj->dma_addr); 78886ad0397SIvaylo Dimitrov 78986ad0397SIvaylo Dimitrov fail: 79086ad0397SIvaylo Dimitrov return ret; 79186ad0397SIvaylo Dimitrov } 79286ad0397SIvaylo Dimitrov 793bc20c85cSLaurent Pinchart /** 794bc20c85cSLaurent Pinchart * omap_gem_pin() - Pin a GEM object in memory 795bc20c85cSLaurent Pinchart * @obj: the GEM object 796bc20c85cSLaurent Pinchart * @dma_addr: the DMA address 797bc20c85cSLaurent Pinchart * 798bc20c85cSLaurent Pinchart * Pin the given GEM object in memory and fill the dma_addr pointer with the 799bc20c85cSLaurent Pinchart * object's DMA address. If the buffer is not physically contiguous it will be 800bc20c85cSLaurent Pinchart * remapped through the TILER to provide a contiguous view. 801bc20c85cSLaurent Pinchart * 802bc20c85cSLaurent Pinchart * Pins are reference-counted, calling this function multiple times is allowed 803bc20c85cSLaurent Pinchart * as long the corresponding omap_gem_unpin() calls are balanced. 804bc20c85cSLaurent Pinchart * 805bc20c85cSLaurent Pinchart * Return 0 on success or a negative error code otherwise. 8068bb0daffSRob Clark */ 807bc20c85cSLaurent Pinchart int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) 8088bb0daffSRob Clark { 8098bb0daffSRob Clark struct omap_drm_private *priv = obj->dev->dev_private; 8108bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 8118bb0daffSRob Clark int ret = 0; 8128bb0daffSRob Clark 8133cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 8148bb0daffSRob Clark 815*1948d28dSIvaylo Dimitrov if (!omap_gem_is_contiguous(omap_obj)) { 816*1948d28dSIvaylo Dimitrov if (refcount_read(&omap_obj->pin_cnt) == 0) { 8178bb0daffSRob Clark 818*1948d28dSIvaylo Dimitrov refcount_set(&omap_obj->pin_cnt, 1); 819cec4fa75SJean-Jacques Hiblot 8202491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj); 8218bb0daffSRob Clark if (ret) 8228bb0daffSRob Clark goto fail; 8238bb0daffSRob Clark 824*1948d28dSIvaylo Dimitrov if (priv->has_dmm) { 82586ad0397SIvaylo Dimitrov ret = omap_gem_pin_tiler(obj); 82686ad0397SIvaylo Dimitrov if (ret) 8278bb0daffSRob Clark goto fail; 828*1948d28dSIvaylo Dimitrov } 829cec4fa75SJean-Jacques Hiblot } else { 830*1948d28dSIvaylo Dimitrov refcount_inc(&omap_obj->pin_cnt); 831*1948d28dSIvaylo Dimitrov } 8328bb0daffSRob Clark } 8338bb0daffSRob Clark 834d6e52e28STomi Valkeinen if (dma_addr) 83516869083SLaurent Pinchart *dma_addr = omap_obj->dma_addr; 8368bb0daffSRob Clark 8378bb0daffSRob Clark fail: 8383cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 8398bb0daffSRob Clark 8408bb0daffSRob Clark return ret; 8418bb0daffSRob Clark } 8428bb0daffSRob Clark 843bc20c85cSLaurent Pinchart /** 844d3e4c46dSTomi Valkeinen * omap_gem_unpin_locked() - Unpin a GEM object from memory 845bc20c85cSLaurent Pinchart * @obj: the GEM object 846bc20c85cSLaurent Pinchart * 847d3e4c46dSTomi Valkeinen * omap_gem_unpin() without locking. 8488bb0daffSRob Clark */ 849d3e4c46dSTomi Valkeinen static void omap_gem_unpin_locked(struct drm_gem_object *obj) 8508bb0daffSRob Clark { 851d9c148cfSTomi Valkeinen struct omap_drm_private *priv = obj->dev->dev_private; 8528bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 853393a949fSTomi Valkeinen int ret; 8548bb0daffSRob Clark 855*1948d28dSIvaylo Dimitrov if (omap_gem_is_contiguous(omap_obj)) 856d9c148cfSTomi Valkeinen return; 857d9c148cfSTomi Valkeinen 858*1948d28dSIvaylo Dimitrov if (refcount_dec_and_test(&omap_obj->pin_cnt)) { 859fe4d0b63SIvaylo Dimitrov if (omap_obj->sgt) { 860fe4d0b63SIvaylo Dimitrov sg_free_table(omap_obj->sgt); 861fe4d0b63SIvaylo Dimitrov kfree(omap_obj->sgt); 862fe4d0b63SIvaylo Dimitrov omap_obj->sgt = NULL; 863fe4d0b63SIvaylo Dimitrov } 864*1948d28dSIvaylo Dimitrov if (priv->has_dmm) { 8658bb0daffSRob Clark ret = tiler_unpin(omap_obj->block); 8668bb0daffSRob Clark if (ret) { 8678bb0daffSRob Clark dev_err(obj->dev->dev, 8688bb0daffSRob Clark "could not unpin pages: %d\n", ret); 8698bb0daffSRob Clark } 8708bb0daffSRob Clark ret = tiler_release(omap_obj->block); 8718bb0daffSRob Clark if (ret) { 8728bb0daffSRob Clark dev_err(obj->dev->dev, 8738bb0daffSRob Clark "could not release unmap: %d\n", ret); 8748bb0daffSRob Clark } 87516869083SLaurent Pinchart omap_obj->dma_addr = 0; 8768bb0daffSRob Clark omap_obj->block = NULL; 8778bb0daffSRob Clark } 878d3e4c46dSTomi Valkeinen } 879*1948d28dSIvaylo Dimitrov } 880393a949fSTomi Valkeinen 881d3e4c46dSTomi Valkeinen /** 882d3e4c46dSTomi Valkeinen * omap_gem_unpin() - Unpin a GEM object from memory 883d3e4c46dSTomi Valkeinen * @obj: the GEM object 884d3e4c46dSTomi Valkeinen * 885d3e4c46dSTomi Valkeinen * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are 886d3e4c46dSTomi Valkeinen * reference-counted, the actual unpin will only be performed when the number 887d3e4c46dSTomi Valkeinen * of calls to this function matches the number of calls to omap_gem_pin(). 888d3e4c46dSTomi Valkeinen */ 889d3e4c46dSTomi Valkeinen void omap_gem_unpin(struct drm_gem_object *obj) 890d3e4c46dSTomi Valkeinen { 891d3e4c46dSTomi Valkeinen struct omap_gem_object *omap_obj = to_omap_bo(obj); 892d3e4c46dSTomi Valkeinen 893d3e4c46dSTomi Valkeinen mutex_lock(&omap_obj->lock); 894d3e4c46dSTomi Valkeinen omap_gem_unpin_locked(obj); 8953cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 8968bb0daffSRob Clark } 8978bb0daffSRob Clark 8988bb0daffSRob Clark /* Get rotated scanout address (only valid if already pinned), at the 8998bb0daffSRob Clark * specified orientation and x,y offset from top-left corner of buffer 9008bb0daffSRob Clark * (only valid for tiled 2d buffers) 9018bb0daffSRob Clark */ 902dfe9cfccSLaurent Pinchart int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, 90316869083SLaurent Pinchart int x, int y, dma_addr_t *dma_addr) 9048bb0daffSRob Clark { 9058bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9068bb0daffSRob Clark int ret = -EINVAL; 9078bb0daffSRob Clark 9083cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 9093cbd0c58SLaurent Pinchart 910*1948d28dSIvaylo Dimitrov if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block && 91148b34ac0STomi Valkeinen (omap_obj->flags & OMAP_BO_TILED_MASK)) { 91216869083SLaurent Pinchart *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); 9138bb0daffSRob Clark ret = 0; 9148bb0daffSRob Clark } 9153cbd0c58SLaurent Pinchart 9163cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 9173cbd0c58SLaurent Pinchart 9188bb0daffSRob Clark return ret; 9198bb0daffSRob Clark } 9208bb0daffSRob Clark 9218bb0daffSRob Clark /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 922dfe9cfccSLaurent Pinchart int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient) 9238bb0daffSRob Clark { 9248bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9258bb0daffSRob Clark int ret = -EINVAL; 92648b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) 9278bb0daffSRob Clark ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 9288bb0daffSRob Clark return ret; 9298bb0daffSRob Clark } 9308bb0daffSRob Clark 9318bb0daffSRob Clark /* if !remap, and we don't have pages backing, then fail, rather than 9328bb0daffSRob Clark * increasing the pin count (which we don't really do yet anyways, 9338bb0daffSRob Clark * because we don't support swapping pages back out). And 'remap' 9348bb0daffSRob Clark * might not be quite the right name, but I wanted to keep it working 935bc20c85cSLaurent Pinchart * similarly to omap_gem_pin(). Note though that mutex is not 9368bb0daffSRob Clark * aquired if !remap (because this can be called in atomic ctxt), 937bc20c85cSLaurent Pinchart * but probably omap_gem_unpin() should be changed to work in the 9388bb0daffSRob Clark * same way. If !remap, a matching omap_gem_put_pages() call is not 9398bb0daffSRob Clark * required (and should not be made). 9408bb0daffSRob Clark */ 9418bb0daffSRob Clark int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 9428bb0daffSRob Clark bool remap) 9438bb0daffSRob Clark { 9448bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 9453cbd0c58SLaurent Pinchart int ret = 0; 9462491244dSLaurent Pinchart 9473cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 9483cbd0c58SLaurent Pinchart 9493cbd0c58SLaurent Pinchart if (remap) { 9502491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj); 9513cbd0c58SLaurent Pinchart if (ret) 9523cbd0c58SLaurent Pinchart goto unlock; 9533cbd0c58SLaurent Pinchart } 9543cbd0c58SLaurent Pinchart 9553cbd0c58SLaurent Pinchart if (!omap_obj->pages) { 9563cbd0c58SLaurent Pinchart ret = -ENOMEM; 9573cbd0c58SLaurent Pinchart goto unlock; 9583cbd0c58SLaurent Pinchart } 9593cbd0c58SLaurent Pinchart 9602491244dSLaurent Pinchart *pages = omap_obj->pages; 9613cbd0c58SLaurent Pinchart 9623cbd0c58SLaurent Pinchart unlock: 9633cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 9643cbd0c58SLaurent Pinchart 9658bb0daffSRob Clark return ret; 9668bb0daffSRob Clark } 9678bb0daffSRob Clark 9688bb0daffSRob Clark /* release pages when DMA no longer being performed */ 9698bb0daffSRob Clark int omap_gem_put_pages(struct drm_gem_object *obj) 9708bb0daffSRob Clark { 9718bb0daffSRob Clark /* do something here if we dynamically attach/detach pages.. at 9728bb0daffSRob Clark * least they would no longer need to be pinned if everyone has 9738bb0daffSRob Clark * released the pages.. 9748bb0daffSRob Clark */ 9758bb0daffSRob Clark return 0; 9768bb0daffSRob Clark } 9778bb0daffSRob Clark 978*1948d28dSIvaylo Dimitrov struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj, 979*1948d28dSIvaylo Dimitrov enum dma_data_direction dir) 980fe4d0b63SIvaylo Dimitrov { 981fe4d0b63SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj); 982fe4d0b63SIvaylo Dimitrov dma_addr_t addr; 983fe4d0b63SIvaylo Dimitrov struct sg_table *sgt; 984fe4d0b63SIvaylo Dimitrov struct scatterlist *sg; 985fe4d0b63SIvaylo Dimitrov unsigned int count, len, stride, i; 986fe4d0b63SIvaylo Dimitrov int ret; 987fe4d0b63SIvaylo Dimitrov 988fe4d0b63SIvaylo Dimitrov ret = omap_gem_pin(obj, &addr); 989fe4d0b63SIvaylo Dimitrov if (ret) 990fe4d0b63SIvaylo Dimitrov return ERR_PTR(ret); 991fe4d0b63SIvaylo Dimitrov 992fe4d0b63SIvaylo Dimitrov mutex_lock(&omap_obj->lock); 993fe4d0b63SIvaylo Dimitrov 994fe4d0b63SIvaylo Dimitrov sgt = omap_obj->sgt; 995fe4d0b63SIvaylo Dimitrov if (sgt) 996fe4d0b63SIvaylo Dimitrov goto out; 997fe4d0b63SIvaylo Dimitrov 998fe4d0b63SIvaylo Dimitrov sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 999fe4d0b63SIvaylo Dimitrov if (!sgt) { 1000fe4d0b63SIvaylo Dimitrov ret = -ENOMEM; 1001fe4d0b63SIvaylo Dimitrov goto err_unpin; 1002fe4d0b63SIvaylo Dimitrov } 1003fe4d0b63SIvaylo Dimitrov 1004*1948d28dSIvaylo Dimitrov if (addr) { 1005fe4d0b63SIvaylo Dimitrov if (omap_obj->flags & OMAP_BO_TILED_MASK) { 1006fe4d0b63SIvaylo Dimitrov enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 1007fe4d0b63SIvaylo Dimitrov 1008fe4d0b63SIvaylo Dimitrov len = omap_obj->width << (int)fmt; 1009fe4d0b63SIvaylo Dimitrov count = omap_obj->height; 1010fe4d0b63SIvaylo Dimitrov stride = tiler_stride(fmt, 0); 1011fe4d0b63SIvaylo Dimitrov } else { 1012fe4d0b63SIvaylo Dimitrov len = obj->size; 1013fe4d0b63SIvaylo Dimitrov count = 1; 1014fe4d0b63SIvaylo Dimitrov stride = 0; 1015fe4d0b63SIvaylo Dimitrov } 1016*1948d28dSIvaylo Dimitrov } else { 1017*1948d28dSIvaylo Dimitrov count = obj->size >> PAGE_SHIFT; 1018*1948d28dSIvaylo Dimitrov } 1019fe4d0b63SIvaylo Dimitrov 1020fe4d0b63SIvaylo Dimitrov ret = sg_alloc_table(sgt, count, GFP_KERNEL); 1021fe4d0b63SIvaylo Dimitrov if (ret) 1022fe4d0b63SIvaylo Dimitrov goto err_free; 1023fe4d0b63SIvaylo Dimitrov 1024*1948d28dSIvaylo Dimitrov /* this must be after omap_gem_pin() to ensure we have pages attached */ 1025*1948d28dSIvaylo Dimitrov omap_gem_dma_sync_buffer(obj, dir); 1026*1948d28dSIvaylo Dimitrov 1027*1948d28dSIvaylo Dimitrov if (addr) { 1028fe4d0b63SIvaylo Dimitrov for_each_sg(sgt->sgl, sg, count, i) { 1029*1948d28dSIvaylo Dimitrov sg_set_page(sg, phys_to_page(addr), len, 1030*1948d28dSIvaylo Dimitrov offset_in_page(addr)); 1031fe4d0b63SIvaylo Dimitrov sg_dma_address(sg) = addr; 1032fe4d0b63SIvaylo Dimitrov sg_dma_len(sg) = len; 1033fe4d0b63SIvaylo Dimitrov 1034fe4d0b63SIvaylo Dimitrov addr += stride; 1035fe4d0b63SIvaylo Dimitrov } 1036*1948d28dSIvaylo Dimitrov } else { 1037*1948d28dSIvaylo Dimitrov for_each_sg(sgt->sgl, sg, count, i) { 1038*1948d28dSIvaylo Dimitrov sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0); 1039*1948d28dSIvaylo Dimitrov sg_dma_address(sg) = omap_obj->dma_addrs[i]; 1040*1948d28dSIvaylo Dimitrov sg_dma_len(sg) = PAGE_SIZE; 1041*1948d28dSIvaylo Dimitrov } 1042*1948d28dSIvaylo Dimitrov } 1043fe4d0b63SIvaylo Dimitrov 1044fe4d0b63SIvaylo Dimitrov omap_obj->sgt = sgt; 1045fe4d0b63SIvaylo Dimitrov out: 1046fe4d0b63SIvaylo Dimitrov mutex_unlock(&omap_obj->lock); 1047fe4d0b63SIvaylo Dimitrov return sgt; 1048fe4d0b63SIvaylo Dimitrov 1049fe4d0b63SIvaylo Dimitrov err_free: 1050fe4d0b63SIvaylo Dimitrov kfree(sgt); 1051fe4d0b63SIvaylo Dimitrov err_unpin: 1052fe4d0b63SIvaylo Dimitrov mutex_unlock(&omap_obj->lock); 1053fe4d0b63SIvaylo Dimitrov omap_gem_unpin(obj); 1054fe4d0b63SIvaylo Dimitrov return ERR_PTR(ret); 1055fe4d0b63SIvaylo Dimitrov } 1056fe4d0b63SIvaylo Dimitrov 1057fe4d0b63SIvaylo Dimitrov void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt) 1058fe4d0b63SIvaylo Dimitrov { 1059fe4d0b63SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj); 1060fe4d0b63SIvaylo Dimitrov 1061fe4d0b63SIvaylo Dimitrov if (WARN_ON(omap_obj->sgt != sgt)) 1062fe4d0b63SIvaylo Dimitrov return; 1063fe4d0b63SIvaylo Dimitrov 1064fe4d0b63SIvaylo Dimitrov omap_gem_unpin(obj); 1065fe4d0b63SIvaylo Dimitrov } 1066fe4d0b63SIvaylo Dimitrov 1067e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION 10683cbd0c58SLaurent Pinchart /* 10693cbd0c58SLaurent Pinchart * Get kernel virtual address for CPU access.. this more or less only 10703cbd0c58SLaurent Pinchart * exists for omap_fbdev. 10718bb0daffSRob Clark */ 10728bb0daffSRob Clark void *omap_gem_vaddr(struct drm_gem_object *obj) 10738bb0daffSRob Clark { 10748bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 10753cbd0c58SLaurent Pinchart void *vaddr; 1076620063e1SLaurent Pinchart int ret; 1077620063e1SLaurent Pinchart 10783cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 10793cbd0c58SLaurent Pinchart 10803cbd0c58SLaurent Pinchart if (!omap_obj->vaddr) { 10812491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj); 10823cbd0c58SLaurent Pinchart if (ret) { 10833cbd0c58SLaurent Pinchart vaddr = ERR_PTR(ret); 10843cbd0c58SLaurent Pinchart goto unlock; 10853cbd0c58SLaurent Pinchart } 10863cbd0c58SLaurent Pinchart 10872491244dSLaurent Pinchart omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, 10888bb0daffSRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 10898bb0daffSRob Clark } 10903cbd0c58SLaurent Pinchart 10913cbd0c58SLaurent Pinchart vaddr = omap_obj->vaddr; 10923cbd0c58SLaurent Pinchart 10933cbd0c58SLaurent Pinchart unlock: 10943cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 10953cbd0c58SLaurent Pinchart return vaddr; 10968bb0daffSRob Clark } 1097e1c1174fSLaurent Pinchart #endif 10988bb0daffSRob Clark 10997ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 11007ef93b0aSLaurent Pinchart * Power Management 11017ef93b0aSLaurent Pinchart */ 11028bb0daffSRob Clark 11038bb0daffSRob Clark #ifdef CONFIG_PM 11048bb0daffSRob Clark /* re-pin objects in DMM in resume path: */ 11057fb15c48SLaurent Pinchart int omap_gem_resume(struct drm_device *dev) 11068bb0daffSRob Clark { 11077fb15c48SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 11088bb0daffSRob Clark struct omap_gem_object *omap_obj; 11098bb0daffSRob Clark int ret = 0; 11108bb0daffSRob Clark 11115117bd89SDaniel Vetter mutex_lock(&priv->list_lock); 11128bb0daffSRob Clark list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 11138bb0daffSRob Clark if (omap_obj->block) { 11148bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 1115dfe9cfccSLaurent Pinchart u32 npages = obj->size >> PAGE_SHIFT; 1116dfe9cfccSLaurent Pinchart 11178bb0daffSRob Clark WARN_ON(!omap_obj->pages); /* this can't happen */ 11188bb0daffSRob Clark ret = tiler_pin(omap_obj->block, 11198bb0daffSRob Clark omap_obj->pages, npages, 11208bb0daffSRob Clark omap_obj->roll, true); 11218bb0daffSRob Clark if (ret) { 11227fb15c48SLaurent Pinchart dev_err(dev->dev, "could not repin: %d\n", ret); 11235117bd89SDaniel Vetter goto done; 11248bb0daffSRob Clark } 11258bb0daffSRob Clark } 11268bb0daffSRob Clark } 11278bb0daffSRob Clark 11285117bd89SDaniel Vetter done: 11295117bd89SDaniel Vetter mutex_unlock(&priv->list_lock); 11305117bd89SDaniel Vetter return ret; 11318bb0daffSRob Clark } 11328bb0daffSRob Clark #endif 11338bb0daffSRob Clark 11347ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 11357ef93b0aSLaurent Pinchart * DebugFS 11367ef93b0aSLaurent Pinchart */ 11377ef93b0aSLaurent Pinchart 11388bb0daffSRob Clark #ifdef CONFIG_DEBUG_FS 11398bb0daffSRob Clark void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 11408bb0daffSRob Clark { 11418bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 1142dfe9cfccSLaurent Pinchart u64 off; 11438bb0daffSRob Clark 11440de23977SDavid Herrmann off = drm_vma_node_start(&obj->vma_node); 11458bb0daffSRob Clark 11463cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 11473cbd0c58SLaurent Pinchart 11482d31ca3aSRussell King seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 11492c935bc5SPeter Zijlstra omap_obj->flags, obj->name, kref_read(&obj->refcount), 1150cec4fa75SJean-Jacques Hiblot off, &omap_obj->dma_addr, 1151*1948d28dSIvaylo Dimitrov refcount_read(&omap_obj->pin_cnt), 11528bb0daffSRob Clark omap_obj->vaddr, omap_obj->roll); 11538bb0daffSRob Clark 115448b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) { 11558bb0daffSRob Clark seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 11568bb0daffSRob Clark if (omap_obj->block) { 11578bb0daffSRob Clark struct tcm_area *area = &omap_obj->block->area; 11588bb0daffSRob Clark seq_printf(m, " (%dx%d, %dx%d)", 11598bb0daffSRob Clark area->p0.x, area->p0.y, 11608bb0daffSRob Clark area->p1.x, area->p1.y); 11618bb0daffSRob Clark } 11628bb0daffSRob Clark } else { 11632150c19bSTomi Valkeinen seq_printf(m, " %zu", obj->size); 11648bb0daffSRob Clark } 11658bb0daffSRob Clark 11663cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 11673cbd0c58SLaurent Pinchart 11688bb0daffSRob Clark seq_printf(m, "\n"); 11698bb0daffSRob Clark } 11708bb0daffSRob Clark 11718bb0daffSRob Clark void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 11728bb0daffSRob Clark { 11738bb0daffSRob Clark struct omap_gem_object *omap_obj; 11748bb0daffSRob Clark int count = 0; 11758bb0daffSRob Clark size_t size = 0; 11768bb0daffSRob Clark 11778bb0daffSRob Clark list_for_each_entry(omap_obj, list, mm_list) { 11788bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base; 11798bb0daffSRob Clark seq_printf(m, " "); 11808bb0daffSRob Clark omap_gem_describe(obj, m); 11818bb0daffSRob Clark count++; 11828bb0daffSRob Clark size += obj->size; 11838bb0daffSRob Clark } 11848bb0daffSRob Clark 11858bb0daffSRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 11868bb0daffSRob Clark } 11878bb0daffSRob Clark #endif 11888bb0daffSRob Clark 11897ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 11907ef93b0aSLaurent Pinchart * Constructor & Destructor 11917ef93b0aSLaurent Pinchart */ 11927ef93b0aSLaurent Pinchart 1193c5ca5e02SThomas Zimmermann static void omap_gem_free_object(struct drm_gem_object *obj) 11948bb0daffSRob Clark { 11958bb0daffSRob Clark struct drm_device *dev = obj->dev; 119676c4055fSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private; 11978bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj); 11988bb0daffSRob Clark 1199620063e1SLaurent Pinchart omap_gem_evict(obj); 12008bb0daffSRob Clark 12015117bd89SDaniel Vetter mutex_lock(&priv->list_lock); 12028bb0daffSRob Clark list_del(&omap_obj->mm_list); 12035117bd89SDaniel Vetter mutex_unlock(&priv->list_lock); 12048bb0daffSRob Clark 12053cbd0c58SLaurent Pinchart /* 12063cbd0c58SLaurent Pinchart * We own the sole reference to the object at this point, but to keep 12073cbd0c58SLaurent Pinchart * lockdep happy, we must still take the omap_obj_lock to call 12083cbd0c58SLaurent Pinchart * omap_gem_detach_pages(). This should hardly make any difference as 12093cbd0c58SLaurent Pinchart * there can't be any lock contention. 12108bb0daffSRob Clark */ 12113cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 12123cbd0c58SLaurent Pinchart 12133cbd0c58SLaurent Pinchart /* The object should not be pinned. */ 1214*1948d28dSIvaylo Dimitrov WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0); 12158bb0daffSRob Clark 1216b22e6690SLaurent Pinchart if (omap_obj->pages) { 1217b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMABUF) 1218b22e6690SLaurent Pinchart kfree(omap_obj->pages); 1219b22e6690SLaurent Pinchart else 12208bb0daffSRob Clark omap_gem_detach_pages(obj); 1221b22e6690SLaurent Pinchart } 12228bb0daffSRob Clark 1223cdb0381dSLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API) { 1224f6e45661SLuis R. Rodriguez dma_free_wc(dev->dev, obj->size, omap_obj->vaddr, 122516869083SLaurent Pinchart omap_obj->dma_addr); 12268bb0daffSRob Clark } else if (omap_obj->vaddr) { 12278bb0daffSRob Clark vunmap(omap_obj->vaddr); 1228b22e6690SLaurent Pinchart } else if (obj->import_attach) { 1229b22e6690SLaurent Pinchart drm_prime_gem_destroy(obj, omap_obj->sgt); 12308bb0daffSRob Clark } 12318bb0daffSRob Clark 12323cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 12333cbd0c58SLaurent Pinchart 12348bb0daffSRob Clark drm_gem_object_release(obj); 12358bb0daffSRob Clark 12363cbd0c58SLaurent Pinchart mutex_destroy(&omap_obj->lock); 12373cbd0c58SLaurent Pinchart 123800e9c7c7SLaurent Pinchart kfree(omap_obj); 12398bb0daffSRob Clark } 12408bb0daffSRob Clark 12414ecc5fbcSTomi Valkeinen static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags) 12424ecc5fbcSTomi Valkeinen { 12434ecc5fbcSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private; 12444ecc5fbcSTomi Valkeinen 12454ecc5fbcSTomi Valkeinen switch (flags & OMAP_BO_CACHE_MASK) { 12464ecc5fbcSTomi Valkeinen case OMAP_BO_CACHED: 12474ecc5fbcSTomi Valkeinen case OMAP_BO_WC: 12484ecc5fbcSTomi Valkeinen case OMAP_BO_CACHE_MASK: 12494ecc5fbcSTomi Valkeinen break; 12504ecc5fbcSTomi Valkeinen 12514ecc5fbcSTomi Valkeinen default: 12524ecc5fbcSTomi Valkeinen return false; 12534ecc5fbcSTomi Valkeinen } 12544ecc5fbcSTomi Valkeinen 12554ecc5fbcSTomi Valkeinen if (flags & OMAP_BO_TILED_MASK) { 12564ecc5fbcSTomi Valkeinen if (!priv->usergart) 12574ecc5fbcSTomi Valkeinen return false; 12584ecc5fbcSTomi Valkeinen 12594ecc5fbcSTomi Valkeinen switch (flags & OMAP_BO_TILED_MASK) { 12604ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_8: 12614ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_16: 12624ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_32: 12634ecc5fbcSTomi Valkeinen break; 12644ecc5fbcSTomi Valkeinen 12654ecc5fbcSTomi Valkeinen default: 12664ecc5fbcSTomi Valkeinen return false; 12674ecc5fbcSTomi Valkeinen } 12684ecc5fbcSTomi Valkeinen } 12694ecc5fbcSTomi Valkeinen 12704ecc5fbcSTomi Valkeinen return true; 12714ecc5fbcSTomi Valkeinen } 12724ecc5fbcSTomi Valkeinen 1273c5ca5e02SThomas Zimmermann static const struct vm_operations_struct omap_gem_vm_ops = { 1274c5ca5e02SThomas Zimmermann .fault = omap_gem_fault, 1275c5ca5e02SThomas Zimmermann .open = drm_gem_vm_open, 1276c5ca5e02SThomas Zimmermann .close = drm_gem_vm_close, 1277c5ca5e02SThomas Zimmermann }; 1278c5ca5e02SThomas Zimmermann 1279c5ca5e02SThomas Zimmermann static const struct drm_gem_object_funcs omap_gem_object_funcs = { 1280c5ca5e02SThomas Zimmermann .free = omap_gem_free_object, 1281c5ca5e02SThomas Zimmermann .export = omap_gem_prime_export, 1282c5ca5e02SThomas Zimmermann .vm_ops = &omap_gem_vm_ops, 1283c5ca5e02SThomas Zimmermann }; 1284c5ca5e02SThomas Zimmermann 1285a96bf3cbSSean Paul /* GEM buffer object constructor */ 12868bb0daffSRob Clark struct drm_gem_object *omap_gem_new(struct drm_device *dev, 1287dfe9cfccSLaurent Pinchart union omap_gem_size gsize, u32 flags) 12888bb0daffSRob Clark { 12898bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 12908bb0daffSRob Clark struct omap_gem_object *omap_obj; 129192b4b445SLaurent Pinchart struct drm_gem_object *obj; 1292ab5a60c3SDavid Herrmann struct address_space *mapping; 12938bb0daffSRob Clark size_t size; 12948bb0daffSRob Clark int ret; 12958bb0daffSRob Clark 12964ecc5fbcSTomi Valkeinen if (!omap_gem_validate_flags(dev, flags)) 12974ecc5fbcSTomi Valkeinen return NULL; 12984ecc5fbcSTomi Valkeinen 12999cba3b99SLaurent Pinchart /* Validate the flags and compute the memory and cache flags. */ 130048b34ac0STomi Valkeinen if (flags & OMAP_BO_TILED_MASK) { 13019cba3b99SLaurent Pinchart /* 13029cba3b99SLaurent Pinchart * Tiled buffers are always shmem paged backed. When they are 13039cba3b99SLaurent Pinchart * scanned out, they are remapped into DMM/TILER. 13048bb0daffSRob Clark */ 13059cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM; 13068bb0daffSRob Clark 13079cba3b99SLaurent Pinchart /* 13089cba3b99SLaurent Pinchart * Currently don't allow cached buffers. There is some caching 13099cba3b99SLaurent Pinchart * stuff that needs to be handled better. 13108bb0daffSRob Clark */ 13117cb0d6c1STomi Valkeinen flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); 13127cb0d6c1STomi Valkeinen flags |= tiler_get_cpu_cache_flags(); 1313a96bf3cbSSean Paul } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 13149cba3b99SLaurent Pinchart /* 131518d7f5abSTomi Valkeinen * If we don't have DMM, we must allocate scanout buffers 131618d7f5abSTomi Valkeinen * from contiguous DMA memory. 13179cba3b99SLaurent Pinchart */ 13189cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_DMA_API; 13193f50effdSTomi Valkeinen } else if (!(flags & OMAP_BO_MEM_DMABUF)) { 13209cba3b99SLaurent Pinchart /* 13213f50effdSTomi Valkeinen * All other buffers not backed by dma_buf are shmem-backed. 13229cba3b99SLaurent Pinchart */ 13239cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM; 13248bb0daffSRob Clark } 13258bb0daffSRob Clark 13269cba3b99SLaurent Pinchart /* Allocate the initialize the OMAP GEM object. */ 13278bb0daffSRob Clark omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1328fffddfd6SLinus Torvalds if (!omap_obj) 1329a903e3b6STomi Valkeinen return NULL; 13308bb0daffSRob Clark 13318bb0daffSRob Clark obj = &omap_obj->base; 13329cba3b99SLaurent Pinchart omap_obj->flags = flags; 13333cbd0c58SLaurent Pinchart mutex_init(&omap_obj->lock); 13348bb0daffSRob Clark 133548b34ac0STomi Valkeinen if (flags & OMAP_BO_TILED_MASK) { 13369cba3b99SLaurent Pinchart /* 13379cba3b99SLaurent Pinchart * For tiled buffers align dimensions to slot boundaries and 13389cba3b99SLaurent Pinchart * calculate size based on aligned dimensions. 13398bb0daffSRob Clark */ 13409cba3b99SLaurent Pinchart tiler_align(gem2fmt(flags), &gsize.tiled.width, 13419cba3b99SLaurent Pinchart &gsize.tiled.height); 13428bb0daffSRob Clark 13439cba3b99SLaurent Pinchart size = tiler_size(gem2fmt(flags), gsize.tiled.width, 13449cba3b99SLaurent Pinchart gsize.tiled.height); 13458bb0daffSRob Clark 13469cba3b99SLaurent Pinchart omap_obj->width = gsize.tiled.width; 13479cba3b99SLaurent Pinchart omap_obj->height = gsize.tiled.height; 13489cba3b99SLaurent Pinchart } else { 13499cba3b99SLaurent Pinchart size = PAGE_ALIGN(gsize.bytes); 13508bb0daffSRob Clark } 13518bb0daffSRob Clark 1352c5ca5e02SThomas Zimmermann obj->funcs = &omap_gem_object_funcs; 1353c5ca5e02SThomas Zimmermann 1354c2eb77ffSLaurent Pinchart /* Initialize the GEM object. */ 1355c2eb77ffSLaurent Pinchart if (!(flags & OMAP_BO_MEM_SHMEM)) { 1356c2eb77ffSLaurent Pinchart drm_gem_private_object_init(dev, obj, size); 1357c2eb77ffSLaurent Pinchart } else { 1358c2eb77ffSLaurent Pinchart ret = drm_gem_object_init(dev, obj, size); 1359c2eb77ffSLaurent Pinchart if (ret) 1360c2eb77ffSLaurent Pinchart goto err_free; 1361c2eb77ffSLaurent Pinchart 136293c76a3dSAl Viro mapping = obj->filp->f_mapping; 1363c2eb77ffSLaurent Pinchart mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1364c2eb77ffSLaurent Pinchart } 1365a903e3b6STomi Valkeinen 13669cba3b99SLaurent Pinchart /* Allocate memory if needed. */ 13679cba3b99SLaurent Pinchart if (flags & OMAP_BO_MEM_DMA_API) { 1368266c73b7SLinus Torvalds omap_obj->vaddr = dma_alloc_wc(dev->dev, size, 136916869083SLaurent Pinchart &omap_obj->dma_addr, 13709cba3b99SLaurent Pinchart GFP_KERNEL); 13719cba3b99SLaurent Pinchart if (!omap_obj->vaddr) 1372c2eb77ffSLaurent Pinchart goto err_release; 13738bb0daffSRob Clark } 13748bb0daffSRob Clark 13755117bd89SDaniel Vetter mutex_lock(&priv->list_lock); 13768bb0daffSRob Clark list_add(&omap_obj->mm_list, &priv->obj_list); 13775117bd89SDaniel Vetter mutex_unlock(&priv->list_lock); 13788bb0daffSRob Clark 13798bb0daffSRob Clark return obj; 13808bb0daffSRob Clark 1381c2eb77ffSLaurent Pinchart err_release: 1382c2eb77ffSLaurent Pinchart drm_gem_object_release(obj); 1383c2eb77ffSLaurent Pinchart err_free: 1384c2eb77ffSLaurent Pinchart kfree(omap_obj); 13858bb0daffSRob Clark return NULL; 13868bb0daffSRob Clark } 13878bb0daffSRob Clark 1388b22e6690SLaurent Pinchart struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, 1389b22e6690SLaurent Pinchart struct sg_table *sgt) 1390b22e6690SLaurent Pinchart { 1391b22e6690SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 1392b22e6690SLaurent Pinchart struct omap_gem_object *omap_obj; 1393b22e6690SLaurent Pinchart struct drm_gem_object *obj; 1394b22e6690SLaurent Pinchart union omap_gem_size gsize; 1395b22e6690SLaurent Pinchart 1396b22e6690SLaurent Pinchart /* Without a DMM only physically contiguous buffers can be supported. */ 1397b22e6690SLaurent Pinchart if (sgt->orig_nents != 1 && !priv->has_dmm) 1398b22e6690SLaurent Pinchart return ERR_PTR(-EINVAL); 1399b22e6690SLaurent Pinchart 1400b22e6690SLaurent Pinchart gsize.bytes = PAGE_ALIGN(size); 1401b22e6690SLaurent Pinchart obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); 14023cbd0c58SLaurent Pinchart if (!obj) 14033cbd0c58SLaurent Pinchart return ERR_PTR(-ENOMEM); 1404b22e6690SLaurent Pinchart 1405b22e6690SLaurent Pinchart omap_obj = to_omap_bo(obj); 14063cbd0c58SLaurent Pinchart 14073cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock); 14083cbd0c58SLaurent Pinchart 1409b22e6690SLaurent Pinchart omap_obj->sgt = sgt; 1410b22e6690SLaurent Pinchart 1411b22e6690SLaurent Pinchart if (sgt->orig_nents == 1) { 141216869083SLaurent Pinchart omap_obj->dma_addr = sg_dma_address(sgt->sgl); 1413b22e6690SLaurent Pinchart } else { 1414b22e6690SLaurent Pinchart /* Create pages list from sgt */ 1415b22e6690SLaurent Pinchart struct page **pages; 1416b22e6690SLaurent Pinchart unsigned int npages; 141753760655SMarek Szyprowski unsigned int ret; 1418b22e6690SLaurent Pinchart 1419b22e6690SLaurent Pinchart npages = DIV_ROUND_UP(size, PAGE_SIZE); 1420b22e6690SLaurent Pinchart pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); 1421b22e6690SLaurent Pinchart if (!pages) { 1422b22e6690SLaurent Pinchart omap_gem_free_object(obj); 1423b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM); 1424b22e6690SLaurent Pinchart goto done; 1425b22e6690SLaurent Pinchart } 1426b22e6690SLaurent Pinchart 1427b22e6690SLaurent Pinchart omap_obj->pages = pages; 1428c67e6279SChristian König ret = drm_prime_sg_to_page_array(sgt, pages, npages); 142953760655SMarek Szyprowski if (ret) { 1430b22e6690SLaurent Pinchart omap_gem_free_object(obj); 1431b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM); 1432b22e6690SLaurent Pinchart goto done; 1433b22e6690SLaurent Pinchart } 1434b22e6690SLaurent Pinchart } 1435b22e6690SLaurent Pinchart 1436b22e6690SLaurent Pinchart done: 14373cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock); 1438b22e6690SLaurent Pinchart return obj; 1439b22e6690SLaurent Pinchart } 1440b22e6690SLaurent Pinchart 14417ef93b0aSLaurent Pinchart /* convenience method to construct a GEM buffer object, and userspace handle */ 14427ef93b0aSLaurent Pinchart int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1443dfe9cfccSLaurent Pinchart union omap_gem_size gsize, u32 flags, u32 *handle) 14447ef93b0aSLaurent Pinchart { 14457ef93b0aSLaurent Pinchart struct drm_gem_object *obj; 14467ef93b0aSLaurent Pinchart int ret; 14477ef93b0aSLaurent Pinchart 14487ef93b0aSLaurent Pinchart obj = omap_gem_new(dev, gsize, flags); 14497ef93b0aSLaurent Pinchart if (!obj) 14507ef93b0aSLaurent Pinchart return -ENOMEM; 14517ef93b0aSLaurent Pinchart 14527ef93b0aSLaurent Pinchart ret = drm_gem_handle_create(file, obj, handle); 14537ef93b0aSLaurent Pinchart if (ret) { 145474128a23SLaurent Pinchart omap_gem_free_object(obj); 14557ef93b0aSLaurent Pinchart return ret; 14567ef93b0aSLaurent Pinchart } 14577ef93b0aSLaurent Pinchart 14587ef93b0aSLaurent Pinchart /* drop reference from allocate - handle holds it now */ 1459d742cdd6SEmil Velikov drm_gem_object_put(obj); 14607ef93b0aSLaurent Pinchart 14617ef93b0aSLaurent Pinchart return 0; 14627ef93b0aSLaurent Pinchart } 14637ef93b0aSLaurent Pinchart 14647ef93b0aSLaurent Pinchart /* ----------------------------------------------------------------------------- 14657ef93b0aSLaurent Pinchart * Init & Cleanup 14667ef93b0aSLaurent Pinchart */ 14677ef93b0aSLaurent Pinchart 14687ef93b0aSLaurent Pinchart /* If DMM is used, we need to set some stuff up.. */ 14698bb0daffSRob Clark void omap_gem_init(struct drm_device *dev) 14708bb0daffSRob Clark { 14718bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private; 1472f4302747SLaurent Pinchart struct omap_drm_usergart *usergart; 14738bb0daffSRob Clark const enum tiler_fmt fmts[] = { 14748bb0daffSRob Clark TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 14758bb0daffSRob Clark }; 14768bb0daffSRob Clark int i, j; 14778bb0daffSRob Clark 14788bb0daffSRob Clark if (!dmm_is_available()) { 14798bb0daffSRob Clark /* DMM only supported on OMAP4 and later, so this isn't fatal */ 14808bb0daffSRob Clark dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 14818bb0daffSRob Clark return; 14828bb0daffSRob Clark } 14838bb0daffSRob Clark 1484fffddfd6SLinus Torvalds usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1485fffddfd6SLinus Torvalds if (!usergart) 14868bb0daffSRob Clark return; 14878bb0daffSRob Clark 14888bb0daffSRob Clark /* reserve 4k aligned/wide regions for userspace mappings: */ 14898bb0daffSRob Clark for (i = 0; i < ARRAY_SIZE(fmts); i++) { 1490dfe9cfccSLaurent Pinchart u16 h = 1, w = PAGE_SIZE >> i; 1491dfe9cfccSLaurent Pinchart 14928bb0daffSRob Clark tiler_align(fmts[i], &w, &h); 14938bb0daffSRob Clark /* note: since each region is 1 4kb page wide, and minimum 14948bb0daffSRob Clark * number of rows, the height ends up being the same as the 14958bb0daffSRob Clark * # of pages in the region 14968bb0daffSRob Clark */ 14978bb0daffSRob Clark usergart[i].height = h; 14988bb0daffSRob Clark usergart[i].height_shift = ilog2(h); 14998bb0daffSRob Clark usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 15008bb0daffSRob Clark usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 15018bb0daffSRob Clark for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1502f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry; 1503f4302747SLaurent Pinchart struct tiler_block *block; 1504f4302747SLaurent Pinchart 1505f4302747SLaurent Pinchart entry = &usergart[i].entry[j]; 1506f4302747SLaurent Pinchart block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); 15078bb0daffSRob Clark if (IS_ERR(block)) { 15088bb0daffSRob Clark dev_err(dev->dev, 15098bb0daffSRob Clark "reserve failed: %d, %d, %ld\n", 15108bb0daffSRob Clark i, j, PTR_ERR(block)); 15118bb0daffSRob Clark return; 15128bb0daffSRob Clark } 151316869083SLaurent Pinchart entry->dma_addr = tiler_ssptr(block); 15148bb0daffSRob Clark entry->block = block; 15158bb0daffSRob Clark 151616869083SLaurent Pinchart DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h, 151716869083SLaurent Pinchart &entry->dma_addr, 15188bb0daffSRob Clark usergart[i].stride_pfn << PAGE_SHIFT); 15198bb0daffSRob Clark } 15208bb0daffSRob Clark } 15218bb0daffSRob Clark 1522f4302747SLaurent Pinchart priv->usergart = usergart; 15238bb0daffSRob Clark priv->has_dmm = true; 15248bb0daffSRob Clark } 15258bb0daffSRob Clark 15268bb0daffSRob Clark void omap_gem_deinit(struct drm_device *dev) 15278bb0daffSRob Clark { 1528f4302747SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private; 1529f4302747SLaurent Pinchart 15308bb0daffSRob Clark /* I believe we can rely on there being no more outstanding GEM 15318bb0daffSRob Clark * objects which could depend on usergart/dmm at this point. 15328bb0daffSRob Clark */ 1533f4302747SLaurent Pinchart kfree(priv->usergart); 15348bb0daffSRob Clark } 1535