1c8afe684SRob Clark /* 2c8afe684SRob Clark * Copyright (C) 2013 Red Hat 3c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 4c8afe684SRob Clark * 5c8afe684SRob Clark * This program is free software; you can redistribute it and/or modify it 6c8afe684SRob Clark * under the terms of the GNU General Public License version 2 as published by 7c8afe684SRob Clark * the Free Software Foundation. 8c8afe684SRob Clark * 9c8afe684SRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 10c8afe684SRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11c8afe684SRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12c8afe684SRob Clark * more details. 13c8afe684SRob Clark * 14c8afe684SRob Clark * You should have received a copy of the GNU General Public License along with 15c8afe684SRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 16c8afe684SRob Clark */ 17c8afe684SRob Clark 18c8afe684SRob Clark #include <linux/spinlock.h> 19c8afe684SRob Clark #include <linux/shmem_fs.h> 2005b84911SRob Clark #include <linux/dma-buf.h> 2101c8f1c4SDan Williams #include <linux/pfn_t.h> 22c8afe684SRob Clark 23c8afe684SRob Clark #include "msm_drv.h" 24fde5de6cSRob Clark #include "msm_fence.h" 25c8afe684SRob Clark #include "msm_gem.h" 267198e6b0SRob Clark #include "msm_gpu.h" 27871d812aSRob Clark #include "msm_mmu.h" 28c8afe684SRob Clark 29871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj) 30871d812aSRob Clark { 31871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 32871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 33871d812aSRob Clark return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 34871d812aSRob Clark priv->vram.paddr; 35871d812aSRob Clark } 36871d812aSRob Clark 37072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj) 38072f1f91SRob Clark { 39072f1f91SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 40072f1f91SRob Clark return !msm_obj->vram_node; 41072f1f91SRob Clark } 42072f1f91SRob Clark 43871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */ 44871d812aSRob Clark static struct page **get_pages_vram(struct drm_gem_object *obj, 45871d812aSRob Clark int npages) 46871d812aSRob Clark { 47871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 48871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 49871d812aSRob Clark dma_addr_t paddr; 50871d812aSRob Clark struct page **p; 51871d812aSRob Clark int ret, i; 52871d812aSRob Clark 532098105eSMichal Hocko p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 54871d812aSRob Clark if (!p) 55871d812aSRob Clark return ERR_PTR(-ENOMEM); 56871d812aSRob Clark 574e64e553SChris Wilson ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 58871d812aSRob Clark if (ret) { 592098105eSMichal Hocko kvfree(p); 60871d812aSRob Clark return ERR_PTR(ret); 61871d812aSRob Clark } 62871d812aSRob Clark 63871d812aSRob Clark paddr = physaddr(obj); 64871d812aSRob Clark for (i = 0; i < npages; i++) { 65871d812aSRob Clark p[i] = phys_to_page(paddr); 66871d812aSRob Clark paddr += PAGE_SIZE; 67871d812aSRob Clark } 68871d812aSRob Clark 69871d812aSRob Clark return p; 70871d812aSRob Clark } 71c8afe684SRob Clark 72c8afe684SRob Clark /* called with dev->struct_mutex held */ 73c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj) 74c8afe684SRob Clark { 75c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 76c8afe684SRob Clark 77c8afe684SRob Clark if (!msm_obj->pages) { 78c8afe684SRob Clark struct drm_device *dev = obj->dev; 79871d812aSRob Clark struct page **p; 80c8afe684SRob Clark int npages = obj->size >> PAGE_SHIFT; 81c8afe684SRob Clark 82072f1f91SRob Clark if (use_pages(obj)) 830cdbe8acSDavid Herrmann p = drm_gem_get_pages(obj); 84871d812aSRob Clark else 85871d812aSRob Clark p = get_pages_vram(obj, npages); 86871d812aSRob Clark 87c8afe684SRob Clark if (IS_ERR(p)) { 88c8afe684SRob Clark dev_err(dev->dev, "could not get pages: %ld\n", 89c8afe684SRob Clark PTR_ERR(p)); 90c8afe684SRob Clark return p; 91c8afe684SRob Clark } 92c8afe684SRob Clark 93c8afe684SRob Clark msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 941f70e079SWei Yongjun if (IS_ERR(msm_obj->sgt)) { 95c8afe684SRob Clark dev_err(dev->dev, "failed to allocate sgt\n"); 961f70e079SWei Yongjun return ERR_CAST(msm_obj->sgt); 97c8afe684SRob Clark } 98c8afe684SRob Clark 99c8afe684SRob Clark msm_obj->pages = p; 100c8afe684SRob Clark 101c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 102c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 103c8afe684SRob Clark */ 104c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 105c8afe684SRob Clark dma_map_sg(dev->dev, msm_obj->sgt->sgl, 106c8afe684SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 107c8afe684SRob Clark } 108c8afe684SRob Clark 109c8afe684SRob Clark return msm_obj->pages; 110c8afe684SRob Clark } 111c8afe684SRob Clark 112c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj) 113c8afe684SRob Clark { 114c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 115c8afe684SRob Clark 116c8afe684SRob Clark if (msm_obj->pages) { 117c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 118c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 119c8afe684SRob Clark */ 120c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 121c8afe684SRob Clark dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 122c8afe684SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 123c8afe684SRob Clark sg_free_table(msm_obj->sgt); 124c8afe684SRob Clark kfree(msm_obj->sgt); 125c8afe684SRob Clark 126072f1f91SRob Clark if (use_pages(obj)) 127c8afe684SRob Clark drm_gem_put_pages(obj, msm_obj->pages, true, false); 1281ffa2425SMicah Richert else { 129871d812aSRob Clark drm_mm_remove_node(msm_obj->vram_node); 1302098105eSMichal Hocko kvfree(msm_obj->pages); 1311ffa2425SMicah Richert } 132871d812aSRob Clark 133c8afe684SRob Clark msm_obj->pages = NULL; 134c8afe684SRob Clark } 135c8afe684SRob Clark } 136c8afe684SRob Clark 13705b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj) 13805b84911SRob Clark { 13905b84911SRob Clark struct drm_device *dev = obj->dev; 14005b84911SRob Clark struct page **p; 14105b84911SRob Clark mutex_lock(&dev->struct_mutex); 14205b84911SRob Clark p = get_pages(obj); 14305b84911SRob Clark mutex_unlock(&dev->struct_mutex); 14405b84911SRob Clark return p; 14505b84911SRob Clark } 14605b84911SRob Clark 14705b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj) 14805b84911SRob Clark { 14905b84911SRob Clark /* when we start tracking the pin count, then do something here */ 15005b84911SRob Clark } 15105b84911SRob Clark 152c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj, 153c8afe684SRob Clark struct vm_area_struct *vma) 154c8afe684SRob Clark { 155c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 156c8afe684SRob Clark 157c8afe684SRob Clark vma->vm_flags &= ~VM_PFNMAP; 158c8afe684SRob Clark vma->vm_flags |= VM_MIXEDMAP; 159c8afe684SRob Clark 160c8afe684SRob Clark if (msm_obj->flags & MSM_BO_WC) { 161c8afe684SRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 162c8afe684SRob Clark } else if (msm_obj->flags & MSM_BO_UNCACHED) { 163c8afe684SRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 164c8afe684SRob Clark } else { 165c8afe684SRob Clark /* 166c8afe684SRob Clark * Shunt off cached objs to shmem file so they have their own 167c8afe684SRob Clark * address_space (so unmap_mapping_range does what we want, 168c8afe684SRob Clark * in particular in the case of mmap'd dmabufs) 169c8afe684SRob Clark */ 170c8afe684SRob Clark fput(vma->vm_file); 171c8afe684SRob Clark get_file(obj->filp); 172c8afe684SRob Clark vma->vm_pgoff = 0; 173c8afe684SRob Clark vma->vm_file = obj->filp; 174c8afe684SRob Clark 175c8afe684SRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 176c8afe684SRob Clark } 177c8afe684SRob Clark 178c8afe684SRob Clark return 0; 179c8afe684SRob Clark } 180c8afe684SRob Clark 181c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 182c8afe684SRob Clark { 183c8afe684SRob Clark int ret; 184c8afe684SRob Clark 185c8afe684SRob Clark ret = drm_gem_mmap(filp, vma); 186c8afe684SRob Clark if (ret) { 187c8afe684SRob Clark DBG("mmap failed: %d", ret); 188c8afe684SRob Clark return ret; 189c8afe684SRob Clark } 190c8afe684SRob Clark 191c8afe684SRob Clark return msm_gem_mmap_obj(vma->vm_private_data, vma); 192c8afe684SRob Clark } 193c8afe684SRob Clark 19411bac800SDave Jiang int msm_gem_fault(struct vm_fault *vmf) 195c8afe684SRob Clark { 19611bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 197c8afe684SRob Clark struct drm_gem_object *obj = vma->vm_private_data; 198c8afe684SRob Clark struct drm_device *dev = obj->dev; 199d78d383aSRob Clark struct msm_drm_private *priv = dev->dev_private; 200c8afe684SRob Clark struct page **pages; 201c8afe684SRob Clark unsigned long pfn; 202c8afe684SRob Clark pgoff_t pgoff; 203c8afe684SRob Clark int ret; 204c8afe684SRob Clark 205d78d383aSRob Clark /* This should only happen if userspace tries to pass a mmap'd 206d78d383aSRob Clark * but unfaulted gem bo vaddr into submit ioctl, triggering 207d78d383aSRob Clark * a page fault while struct_mutex is already held. This is 208d78d383aSRob Clark * not a valid use-case so just bail. 209d78d383aSRob Clark */ 210d78d383aSRob Clark if (priv->struct_mutex_task == current) 211d78d383aSRob Clark return VM_FAULT_SIGBUS; 212d78d383aSRob Clark 213c8afe684SRob Clark /* Make sure we don't parallel update on a fault, nor move or remove 214c8afe684SRob Clark * something from beneath our feet 215c8afe684SRob Clark */ 216c8afe684SRob Clark ret = mutex_lock_interruptible(&dev->struct_mutex); 217c8afe684SRob Clark if (ret) 218c8afe684SRob Clark goto out; 219c8afe684SRob Clark 220c8afe684SRob Clark /* make sure we have pages attached now */ 221c8afe684SRob Clark pages = get_pages(obj); 222c8afe684SRob Clark if (IS_ERR(pages)) { 223c8afe684SRob Clark ret = PTR_ERR(pages); 224c8afe684SRob Clark goto out_unlock; 225c8afe684SRob Clark } 226c8afe684SRob Clark 227c8afe684SRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 2281a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 229c8afe684SRob Clark 230871d812aSRob Clark pfn = page_to_pfn(pages[pgoff]); 231c8afe684SRob Clark 2321a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 233c8afe684SRob Clark pfn, pfn << PAGE_SHIFT); 234c8afe684SRob Clark 2351a29d85eSJan Kara ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 236c8afe684SRob Clark 237c8afe684SRob Clark out_unlock: 238c8afe684SRob Clark mutex_unlock(&dev->struct_mutex); 239c8afe684SRob Clark out: 240c8afe684SRob Clark switch (ret) { 241c8afe684SRob Clark case -EAGAIN: 242c8afe684SRob Clark case 0: 243c8afe684SRob Clark case -ERESTARTSYS: 244c8afe684SRob Clark case -EINTR: 245505886d5SRob Clark case -EBUSY: 246505886d5SRob Clark /* 247505886d5SRob Clark * EBUSY is ok: this just means that another thread 248505886d5SRob Clark * already did the job. 249505886d5SRob Clark */ 250c8afe684SRob Clark return VM_FAULT_NOPAGE; 251c8afe684SRob Clark case -ENOMEM: 252c8afe684SRob Clark return VM_FAULT_OOM; 253c8afe684SRob Clark default: 254c8afe684SRob Clark return VM_FAULT_SIGBUS; 255c8afe684SRob Clark } 256c8afe684SRob Clark } 257c8afe684SRob Clark 258c8afe684SRob Clark /** get mmap offset */ 259c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj) 260c8afe684SRob Clark { 261c8afe684SRob Clark struct drm_device *dev = obj->dev; 262c8afe684SRob Clark int ret; 263c8afe684SRob Clark 264c8afe684SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 265c8afe684SRob Clark 266c8afe684SRob Clark /* Make it mmapable */ 267c8afe684SRob Clark ret = drm_gem_create_mmap_offset(obj); 268c8afe684SRob Clark 269c8afe684SRob Clark if (ret) { 270c8afe684SRob Clark dev_err(dev->dev, "could not allocate mmap offset\n"); 271c8afe684SRob Clark return 0; 272c8afe684SRob Clark } 273c8afe684SRob Clark 274c8afe684SRob Clark return drm_vma_node_offset_addr(&obj->vma_node); 275c8afe684SRob Clark } 276c8afe684SRob Clark 277c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 278c8afe684SRob Clark { 279c8afe684SRob Clark uint64_t offset; 280c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 281c8afe684SRob Clark offset = mmap_offset(obj); 282c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 283c8afe684SRob Clark return offset; 284c8afe684SRob Clark } 285c8afe684SRob Clark 2864fe5f65eSRob Clark static void 2874fe5f65eSRob Clark put_iova(struct drm_gem_object *obj) 2884fe5f65eSRob Clark { 2894fe5f65eSRob Clark struct drm_device *dev = obj->dev; 2904fe5f65eSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 2914fe5f65eSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 2924fe5f65eSRob Clark int id; 2934fe5f65eSRob Clark 2944fe5f65eSRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2954fe5f65eSRob Clark 2964fe5f65eSRob Clark for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 297de85d2b3SRob Clark if (!priv->aspace[id]) 298de85d2b3SRob Clark continue; 299667ce33eSRob Clark msm_gem_unmap_vma(priv->aspace[id], 300667ce33eSRob Clark &msm_obj->domain[id], msm_obj->sgt); 3014fe5f65eSRob Clark } 3024fe5f65eSRob Clark } 3034fe5f65eSRob Clark 304c8afe684SRob Clark /* should be called under struct_mutex.. although it can be called 305c8afe684SRob Clark * from atomic context without struct_mutex to acquire an extra 306c8afe684SRob Clark * iova ref if you know one is already held. 307c8afe684SRob Clark * 308c8afe684SRob Clark * That means when I do eventually need to add support for unpinning 309c8afe684SRob Clark * the refcnt counter needs to be atomic_t. 310c8afe684SRob Clark */ 3118bdcd949SRob Clark int msm_gem_get_iova_locked(struct drm_gem_object *obj, 3128bdcd949SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova) 313c8afe684SRob Clark { 314c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3158bdcd949SRob Clark int id = aspace ? aspace->id : 0; 316c8afe684SRob Clark int ret = 0; 317c8afe684SRob Clark 318cb1e3818SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 319cb1e3818SRob Clark 320c8afe684SRob Clark if (!msm_obj->domain[id].iova) { 321c8afe684SRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 322871d812aSRob Clark struct page **pages = get_pages(obj); 323871d812aSRob Clark 324c8afe684SRob Clark if (IS_ERR(pages)) 325c8afe684SRob Clark return PTR_ERR(pages); 326871d812aSRob Clark 327871d812aSRob Clark if (iommu_present(&platform_bus_type)) { 328667ce33eSRob Clark ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], 329667ce33eSRob Clark msm_obj->sgt, obj->size >> PAGE_SHIFT); 330871d812aSRob Clark } else { 331871d812aSRob Clark msm_obj->domain[id].iova = physaddr(obj); 332871d812aSRob Clark } 333c8afe684SRob Clark } 334c8afe684SRob Clark 335c8afe684SRob Clark if (!ret) 336c8afe684SRob Clark *iova = msm_obj->domain[id].iova; 337c8afe684SRob Clark 338c8afe684SRob Clark return ret; 339c8afe684SRob Clark } 340c8afe684SRob Clark 3412638d90aSRob Clark /* get iova, taking a reference. Should have a matching put */ 3428bdcd949SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, 3438bdcd949SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova) 344c8afe684SRob Clark { 345edd4fc63SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3468bdcd949SRob Clark int id = aspace ? aspace->id : 0; 347c8afe684SRob Clark int ret; 348edd4fc63SRob Clark 349edd4fc63SRob Clark /* this is safe right now because we don't unmap until the 350edd4fc63SRob Clark * bo is deleted: 351edd4fc63SRob Clark */ 352edd4fc63SRob Clark if (msm_obj->domain[id].iova) { 353cb1e3818SRob Clark might_lock(&obj->dev->struct_mutex); 354edd4fc63SRob Clark *iova = msm_obj->domain[id].iova; 355edd4fc63SRob Clark return 0; 356edd4fc63SRob Clark } 357edd4fc63SRob Clark 358c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 3598bdcd949SRob Clark ret = msm_gem_get_iova_locked(obj, aspace, iova); 360c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 361c8afe684SRob Clark return ret; 362c8afe684SRob Clark } 363c8afe684SRob Clark 3642638d90aSRob Clark /* get iova without taking a reference, used in places where you have 3652638d90aSRob Clark * already done a 'msm_gem_get_iova()'. 3662638d90aSRob Clark */ 3678bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj, 3688bdcd949SRob Clark struct msm_gem_address_space *aspace) 3692638d90aSRob Clark { 3702638d90aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3718bdcd949SRob Clark int id = aspace ? aspace->id : 0; 3722638d90aSRob Clark WARN_ON(!msm_obj->domain[id].iova); 3732638d90aSRob Clark return msm_obj->domain[id].iova; 3742638d90aSRob Clark } 3752638d90aSRob Clark 3768bdcd949SRob Clark void msm_gem_put_iova(struct drm_gem_object *obj, 3778bdcd949SRob Clark struct msm_gem_address_space *aspace) 378c8afe684SRob Clark { 379c8afe684SRob Clark // XXX TODO .. 380c8afe684SRob Clark // NOTE: probably don't need a _locked() version.. we wouldn't 381c8afe684SRob Clark // normally unmap here, but instead just mark that it could be 382c8afe684SRob Clark // unmapped (if the iova refcnt drops to zero), but then later 383c8afe684SRob Clark // if another _get_iova_locked() fails we can start unmapping 384c8afe684SRob Clark // things that are no longer needed.. 385c8afe684SRob Clark } 386c8afe684SRob Clark 387c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 388c8afe684SRob Clark struct drm_mode_create_dumb *args) 389c8afe684SRob Clark { 390c8afe684SRob Clark args->pitch = align_pitch(args->width, args->bpp); 391c8afe684SRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 392c8afe684SRob Clark return msm_gem_new_handle(dev, file, args->size, 393c8afe684SRob Clark MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 394c8afe684SRob Clark } 395c8afe684SRob Clark 396c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 397c8afe684SRob Clark uint32_t handle, uint64_t *offset) 398c8afe684SRob Clark { 399c8afe684SRob Clark struct drm_gem_object *obj; 400c8afe684SRob Clark int ret = 0; 401c8afe684SRob Clark 402c8afe684SRob Clark /* GEM does all our handle to object mapping */ 403a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 404c8afe684SRob Clark if (obj == NULL) { 405c8afe684SRob Clark ret = -ENOENT; 406c8afe684SRob Clark goto fail; 407c8afe684SRob Clark } 408c8afe684SRob Clark 409c8afe684SRob Clark *offset = msm_gem_mmap_offset(obj); 410c8afe684SRob Clark 411c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 412c8afe684SRob Clark 413c8afe684SRob Clark fail: 414c8afe684SRob Clark return ret; 415c8afe684SRob Clark } 416c8afe684SRob Clark 41718f23049SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 418c8afe684SRob Clark { 419c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 420c8afe684SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 421c8afe684SRob Clark if (!msm_obj->vaddr) { 422c8afe684SRob Clark struct page **pages = get_pages(obj); 423c8afe684SRob Clark if (IS_ERR(pages)) 424c8afe684SRob Clark return ERR_CAST(pages); 425c8afe684SRob Clark msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 426c8afe684SRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 42769a834c2SRob Clark if (msm_obj->vaddr == NULL) 42869a834c2SRob Clark return ERR_PTR(-ENOMEM); 429c8afe684SRob Clark } 430e1e9db2cSRob Clark msm_obj->vmap_count++; 431c8afe684SRob Clark return msm_obj->vaddr; 432c8afe684SRob Clark } 433c8afe684SRob Clark 43418f23049SRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj) 435c8afe684SRob Clark { 436c8afe684SRob Clark void *ret; 437c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 43818f23049SRob Clark ret = msm_gem_get_vaddr_locked(obj); 439c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 440c8afe684SRob Clark return ret; 441c8afe684SRob Clark } 442c8afe684SRob Clark 44318f23049SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 44418f23049SRob Clark { 445e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 44618f23049SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 447e1e9db2cSRob Clark WARN_ON(msm_obj->vmap_count < 1); 448e1e9db2cSRob Clark msm_obj->vmap_count--; 44918f23049SRob Clark } 45018f23049SRob Clark 45118f23049SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj) 45218f23049SRob Clark { 453e1e9db2cSRob Clark mutex_lock(&obj->dev->struct_mutex); 454e1e9db2cSRob Clark msm_gem_put_vaddr_locked(obj); 455e1e9db2cSRob Clark mutex_unlock(&obj->dev->struct_mutex); 45618f23049SRob Clark } 45718f23049SRob Clark 4584cd33c48SRob Clark /* Update madvise status, returns true if not purged, else 4594cd33c48SRob Clark * false or -errno. 4604cd33c48SRob Clark */ 4614cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 4624cd33c48SRob Clark { 4634cd33c48SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 4644cd33c48SRob Clark 4654cd33c48SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 4664cd33c48SRob Clark 4674cd33c48SRob Clark if (msm_obj->madv != __MSM_MADV_PURGED) 4684cd33c48SRob Clark msm_obj->madv = madv; 4694cd33c48SRob Clark 4704cd33c48SRob Clark return (msm_obj->madv != __MSM_MADV_PURGED); 4714cd33c48SRob Clark } 4724cd33c48SRob Clark 47368209390SRob Clark void msm_gem_purge(struct drm_gem_object *obj) 47468209390SRob Clark { 47568209390SRob Clark struct drm_device *dev = obj->dev; 47668209390SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 47768209390SRob Clark 47868209390SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 47968209390SRob Clark WARN_ON(!is_purgeable(msm_obj)); 48068209390SRob Clark WARN_ON(obj->import_attach); 48168209390SRob Clark 48268209390SRob Clark put_iova(obj); 48368209390SRob Clark 484e1e9db2cSRob Clark msm_gem_vunmap(obj); 48568209390SRob Clark 48668209390SRob Clark put_pages(obj); 48768209390SRob Clark 48868209390SRob Clark msm_obj->madv = __MSM_MADV_PURGED; 48968209390SRob Clark 49068209390SRob Clark drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 49168209390SRob Clark drm_gem_free_mmap_offset(obj); 49268209390SRob Clark 49368209390SRob Clark /* Our goal here is to return as much of the memory as 49468209390SRob Clark * is possible back to the system as we are called from OOM. 49568209390SRob Clark * To do this we must instruct the shmfs to drop all of its 49668209390SRob Clark * backing pages, *now*. 49768209390SRob Clark */ 49868209390SRob Clark shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 49968209390SRob Clark 50068209390SRob Clark invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 50168209390SRob Clark 0, (loff_t)-1); 50268209390SRob Clark } 50368209390SRob Clark 504e1e9db2cSRob Clark void msm_gem_vunmap(struct drm_gem_object *obj) 505e1e9db2cSRob Clark { 506e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 507e1e9db2cSRob Clark 508e1e9db2cSRob Clark if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 509e1e9db2cSRob Clark return; 510e1e9db2cSRob Clark 511e1e9db2cSRob Clark vunmap(msm_obj->vaddr); 512e1e9db2cSRob Clark msm_obj->vaddr = NULL; 513e1e9db2cSRob Clark } 514e1e9db2cSRob Clark 515b6295f9aSRob Clark /* must be called before _move_to_active().. */ 516b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj, 517b6295f9aSRob Clark struct msm_fence_context *fctx, bool exclusive) 518b6295f9aSRob Clark { 519b6295f9aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 520b6295f9aSRob Clark struct reservation_object_list *fobj; 521f54d1867SChris Wilson struct dma_fence *fence; 522b6295f9aSRob Clark int i, ret; 523b6295f9aSRob Clark 524b6295f9aSRob Clark if (!exclusive) { 525b6295f9aSRob Clark /* NOTE: _reserve_shared() must happen before _add_shared_fence(), 526b6295f9aSRob Clark * which makes this a slightly strange place to call it. OTOH this 527b6295f9aSRob Clark * is a convenient can-fail point to hook it in. (And similar to 528b6295f9aSRob Clark * how etnaviv and nouveau handle this.) 529b6295f9aSRob Clark */ 530b6295f9aSRob Clark ret = reservation_object_reserve_shared(msm_obj->resv); 531b6295f9aSRob Clark if (ret) 532b6295f9aSRob Clark return ret; 533b6295f9aSRob Clark } 534b6295f9aSRob Clark 535b6295f9aSRob Clark fobj = reservation_object_get_list(msm_obj->resv); 536b6295f9aSRob Clark if (!fobj || (fobj->shared_count == 0)) { 537b6295f9aSRob Clark fence = reservation_object_get_excl(msm_obj->resv); 538b6295f9aSRob Clark /* don't need to wait on our own fences, since ring is fifo */ 539b6295f9aSRob Clark if (fence && (fence->context != fctx->context)) { 540f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 541b6295f9aSRob Clark if (ret) 542b6295f9aSRob Clark return ret; 543b6295f9aSRob Clark } 544b6295f9aSRob Clark } 545b6295f9aSRob Clark 546b6295f9aSRob Clark if (!exclusive || !fobj) 547b6295f9aSRob Clark return 0; 548b6295f9aSRob Clark 549b6295f9aSRob Clark for (i = 0; i < fobj->shared_count; i++) { 550b6295f9aSRob Clark fence = rcu_dereference_protected(fobj->shared[i], 551b6295f9aSRob Clark reservation_object_held(msm_obj->resv)); 552b6295f9aSRob Clark if (fence->context != fctx->context) { 553f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 554b6295f9aSRob Clark if (ret) 555b6295f9aSRob Clark return ret; 556b6295f9aSRob Clark } 557b6295f9aSRob Clark } 558b6295f9aSRob Clark 559b6295f9aSRob Clark return 0; 560b6295f9aSRob Clark } 561b6295f9aSRob Clark 5627198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj, 563f54d1867SChris Wilson struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 5647198e6b0SRob Clark { 5657198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 5664cd33c48SRob Clark WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 5677198e6b0SRob Clark msm_obj->gpu = gpu; 568b6295f9aSRob Clark if (exclusive) 569b6295f9aSRob Clark reservation_object_add_excl_fence(msm_obj->resv, fence); 570bf6811f3SRob Clark else 571b6295f9aSRob Clark reservation_object_add_shared_fence(msm_obj->resv, fence); 5727198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 5737198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &gpu->active_list); 5747198e6b0SRob Clark } 5757198e6b0SRob Clark 5767198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj) 5777198e6b0SRob Clark { 5787198e6b0SRob Clark struct drm_device *dev = obj->dev; 5797198e6b0SRob Clark struct msm_drm_private *priv = dev->dev_private; 5807198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 5817198e6b0SRob Clark 5827198e6b0SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 5837198e6b0SRob Clark 5847198e6b0SRob Clark msm_obj->gpu = NULL; 5857198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 5867198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 5877198e6b0SRob Clark } 5887198e6b0SRob Clark 589ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 590ba00c3f2SRob Clark { 591b6295f9aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 592b6295f9aSRob Clark bool write = !!(op & MSM_PREP_WRITE); 593f755e227SChris Wilson unsigned long remain = 594f755e227SChris Wilson op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 595f755e227SChris Wilson long ret; 596b6295f9aSRob Clark 597b6295f9aSRob Clark ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 598f755e227SChris Wilson true, remain); 599f755e227SChris Wilson if (ret == 0) 600f755e227SChris Wilson return remain == 0 ? -EBUSY : -ETIMEDOUT; 601f755e227SChris Wilson else if (ret < 0) 602f755e227SChris Wilson return ret; 603ba00c3f2SRob Clark 6047198e6b0SRob Clark /* TODO cache maintenance */ 6057198e6b0SRob Clark 606b6295f9aSRob Clark return 0; 6077198e6b0SRob Clark } 6087198e6b0SRob Clark 6097198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj) 6107198e6b0SRob Clark { 6117198e6b0SRob Clark /* TODO cache maintenance */ 612c8afe684SRob Clark return 0; 613c8afe684SRob Clark } 614c8afe684SRob Clark 615c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS 616f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type, 617b6295f9aSRob Clark struct seq_file *m) 618b6295f9aSRob Clark { 619f54d1867SChris Wilson if (!dma_fence_is_signaled(fence)) 620b6295f9aSRob Clark seq_printf(m, "\t%9s: %s %s seq %u\n", type, 621b6295f9aSRob Clark fence->ops->get_driver_name(fence), 622b6295f9aSRob Clark fence->ops->get_timeline_name(fence), 623b6295f9aSRob Clark fence->seqno); 624b6295f9aSRob Clark } 625b6295f9aSRob Clark 626c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 627c8afe684SRob Clark { 628c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 629b6295f9aSRob Clark struct reservation_object *robj = msm_obj->resv; 630b6295f9aSRob Clark struct reservation_object_list *fobj; 631667ce33eSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 632f54d1867SChris Wilson struct dma_fence *fence; 633c8afe684SRob Clark uint64_t off = drm_vma_node_start(&obj->vma_node); 6344cd33c48SRob Clark const char *madv; 635667ce33eSRob Clark unsigned id; 636c8afe684SRob Clark 637b6295f9aSRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 638b6295f9aSRob Clark 6394cd33c48SRob Clark switch (msm_obj->madv) { 6404cd33c48SRob Clark case __MSM_MADV_PURGED: 6414cd33c48SRob Clark madv = " purged"; 6424cd33c48SRob Clark break; 6434cd33c48SRob Clark case MSM_MADV_DONTNEED: 6444cd33c48SRob Clark madv = " purgeable"; 6454cd33c48SRob Clark break; 6464cd33c48SRob Clark case MSM_MADV_WILLNEED: 6474cd33c48SRob Clark default: 6484cd33c48SRob Clark madv = ""; 6494cd33c48SRob Clark break; 6504cd33c48SRob Clark } 6514cd33c48SRob Clark 652667ce33eSRob Clark seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", 6537198e6b0SRob Clark msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 6542c935bc5SPeter Zijlstra obj->name, kref_read(&obj->refcount), 655667ce33eSRob Clark off, msm_obj->vaddr); 656667ce33eSRob Clark 657667ce33eSRob Clark for (id = 0; id < priv->num_aspaces; id++) 658667ce33eSRob Clark seq_printf(m, " %08llx", msm_obj->domain[id].iova); 659667ce33eSRob Clark 660667ce33eSRob Clark seq_printf(m, " %zu%s\n", obj->size, madv); 661b6295f9aSRob Clark 662b6295f9aSRob Clark rcu_read_lock(); 663b6295f9aSRob Clark fobj = rcu_dereference(robj->fence); 664b6295f9aSRob Clark if (fobj) { 665b6295f9aSRob Clark unsigned int i, shared_count = fobj->shared_count; 666b6295f9aSRob Clark 667b6295f9aSRob Clark for (i = 0; i < shared_count; i++) { 668b6295f9aSRob Clark fence = rcu_dereference(fobj->shared[i]); 669b6295f9aSRob Clark describe_fence(fence, "Shared", m); 670b6295f9aSRob Clark } 671b6295f9aSRob Clark } 672b6295f9aSRob Clark 673b6295f9aSRob Clark fence = rcu_dereference(robj->fence_excl); 674b6295f9aSRob Clark if (fence) 675b6295f9aSRob Clark describe_fence(fence, "Exclusive", m); 676b6295f9aSRob Clark rcu_read_unlock(); 677c8afe684SRob Clark } 678c8afe684SRob Clark 679c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 680c8afe684SRob Clark { 681c8afe684SRob Clark struct msm_gem_object *msm_obj; 682c8afe684SRob Clark int count = 0; 683c8afe684SRob Clark size_t size = 0; 684c8afe684SRob Clark 685c8afe684SRob Clark list_for_each_entry(msm_obj, list, mm_list) { 686c8afe684SRob Clark struct drm_gem_object *obj = &msm_obj->base; 687c8afe684SRob Clark seq_printf(m, " "); 688c8afe684SRob Clark msm_gem_describe(obj, m); 689c8afe684SRob Clark count++; 690c8afe684SRob Clark size += obj->size; 691c8afe684SRob Clark } 692c8afe684SRob Clark 693c8afe684SRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 694c8afe684SRob Clark } 695c8afe684SRob Clark #endif 696c8afe684SRob Clark 697c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj) 698c8afe684SRob Clark { 699c8afe684SRob Clark struct drm_device *dev = obj->dev; 700c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 701c8afe684SRob Clark 702c8afe684SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 703c8afe684SRob Clark 7047198e6b0SRob Clark /* object should not be on active list: */ 7057198e6b0SRob Clark WARN_ON(is_active(msm_obj)); 7067198e6b0SRob Clark 707c8afe684SRob Clark list_del(&msm_obj->mm_list); 708c8afe684SRob Clark 7094fe5f65eSRob Clark put_iova(obj); 710c8afe684SRob Clark 71105b84911SRob Clark if (obj->import_attach) { 71205b84911SRob Clark if (msm_obj->vaddr) 71305b84911SRob Clark dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 71405b84911SRob Clark 71505b84911SRob Clark /* Don't drop the pages for imported dmabuf, as they are not 71605b84911SRob Clark * ours, just free the array we allocated: 71705b84911SRob Clark */ 71805b84911SRob Clark if (msm_obj->pages) 7192098105eSMichal Hocko kvfree(msm_obj->pages); 72005b84911SRob Clark 721f28730c8Sjilai wang drm_prime_gem_destroy(obj, msm_obj->sgt); 72205b84911SRob Clark } else { 723e1e9db2cSRob Clark msm_gem_vunmap(obj); 724c8afe684SRob Clark put_pages(obj); 72505b84911SRob Clark } 726c8afe684SRob Clark 7277198e6b0SRob Clark if (msm_obj->resv == &msm_obj->_resv) 7287198e6b0SRob Clark reservation_object_fini(msm_obj->resv); 7297198e6b0SRob Clark 730c8afe684SRob Clark drm_gem_object_release(obj); 731c8afe684SRob Clark 732c8afe684SRob Clark kfree(msm_obj); 733c8afe684SRob Clark } 734c8afe684SRob Clark 735c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */ 736c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 737c8afe684SRob Clark uint32_t size, uint32_t flags, uint32_t *handle) 738c8afe684SRob Clark { 739c8afe684SRob Clark struct drm_gem_object *obj; 740c8afe684SRob Clark int ret; 741c8afe684SRob Clark 742c8afe684SRob Clark ret = mutex_lock_interruptible(&dev->struct_mutex); 743c8afe684SRob Clark if (ret) 744c8afe684SRob Clark return ret; 745c8afe684SRob Clark 746c8afe684SRob Clark obj = msm_gem_new(dev, size, flags); 747c8afe684SRob Clark 748c8afe684SRob Clark mutex_unlock(&dev->struct_mutex); 749c8afe684SRob Clark 750c8afe684SRob Clark if (IS_ERR(obj)) 751c8afe684SRob Clark return PTR_ERR(obj); 752c8afe684SRob Clark 753c8afe684SRob Clark ret = drm_gem_handle_create(file, obj, handle); 754c8afe684SRob Clark 755c8afe684SRob Clark /* drop reference from allocate - handle holds it now */ 756c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 757c8afe684SRob Clark 758c8afe684SRob Clark return ret; 759c8afe684SRob Clark } 760c8afe684SRob Clark 76105b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev, 76205b84911SRob Clark uint32_t size, uint32_t flags, 76379f0e202SRob Clark struct reservation_object *resv, 76405b84911SRob Clark struct drm_gem_object **obj) 765c8afe684SRob Clark { 766c8afe684SRob Clark struct msm_drm_private *priv = dev->dev_private; 767c8afe684SRob Clark struct msm_gem_object *msm_obj; 768072f1f91SRob Clark bool use_vram = false; 769c8afe684SRob Clark 77090dd57deSJordan Crouse WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 77190dd57deSJordan Crouse 772c8afe684SRob Clark switch (flags & MSM_BO_CACHE_MASK) { 773c8afe684SRob Clark case MSM_BO_UNCACHED: 774c8afe684SRob Clark case MSM_BO_CACHED: 775c8afe684SRob Clark case MSM_BO_WC: 776c8afe684SRob Clark break; 777c8afe684SRob Clark default: 778c8afe684SRob Clark dev_err(dev->dev, "invalid cache flag: %x\n", 779c8afe684SRob Clark (flags & MSM_BO_CACHE_MASK)); 78005b84911SRob Clark return -EINVAL; 781c8afe684SRob Clark } 782c8afe684SRob Clark 783871d812aSRob Clark if (!iommu_present(&platform_bus_type)) 784072f1f91SRob Clark use_vram = true; 785072f1f91SRob Clark else if ((flags & MSM_BO_STOLEN) && priv->vram.size) 786072f1f91SRob Clark use_vram = true; 787072f1f91SRob Clark 788072f1f91SRob Clark if (WARN_ON(use_vram && !priv->vram.size)) 789072f1f91SRob Clark return -EINVAL; 790072f1f91SRob Clark 791667ce33eSRob Clark msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 79205b84911SRob Clark if (!msm_obj) 79305b84911SRob Clark return -ENOMEM; 794c8afe684SRob Clark 795072f1f91SRob Clark if (use_vram) 796667ce33eSRob Clark msm_obj->vram_node = &msm_obj->domain[0].node; 797871d812aSRob Clark 798c8afe684SRob Clark msm_obj->flags = flags; 7994cd33c48SRob Clark msm_obj->madv = MSM_MADV_WILLNEED; 800c8afe684SRob Clark 80179f0e202SRob Clark if (resv) { 80279f0e202SRob Clark msm_obj->resv = resv; 80379f0e202SRob Clark } else { 8047198e6b0SRob Clark msm_obj->resv = &msm_obj->_resv; 8057198e6b0SRob Clark reservation_object_init(msm_obj->resv); 80679f0e202SRob Clark } 807c8afe684SRob Clark 8087198e6b0SRob Clark INIT_LIST_HEAD(&msm_obj->submit_entry); 809c8afe684SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 810c8afe684SRob Clark 81105b84911SRob Clark *obj = &msm_obj->base; 81205b84911SRob Clark 81305b84911SRob Clark return 0; 81405b84911SRob Clark } 81505b84911SRob Clark 81605b84911SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, 81705b84911SRob Clark uint32_t size, uint32_t flags) 81805b84911SRob Clark { 819871d812aSRob Clark struct drm_gem_object *obj = NULL; 82005b84911SRob Clark int ret; 82105b84911SRob Clark 82205b84911SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 82305b84911SRob Clark 82405b84911SRob Clark size = PAGE_ALIGN(size); 82505b84911SRob Clark 8261a5dff5dSJordan Crouse /* Disallow zero sized objects as they make the underlying 8271a5dff5dSJordan Crouse * infrastructure grumpy 8281a5dff5dSJordan Crouse */ 8291a5dff5dSJordan Crouse if (size == 0) 8301a5dff5dSJordan Crouse return ERR_PTR(-EINVAL); 8311a5dff5dSJordan Crouse 83279f0e202SRob Clark ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 83305b84911SRob Clark if (ret) 83405b84911SRob Clark goto fail; 83505b84911SRob Clark 836072f1f91SRob Clark if (use_pages(obj)) { 83705b84911SRob Clark ret = drm_gem_object_init(dev, obj, size); 83805b84911SRob Clark if (ret) 83905b84911SRob Clark goto fail; 840871d812aSRob Clark } else { 841871d812aSRob Clark drm_gem_private_object_init(dev, obj, size); 842871d812aSRob Clark } 84305b84911SRob Clark 84405b84911SRob Clark return obj; 84505b84911SRob Clark 84605b84911SRob Clark fail: 8479999f105SRob Clark drm_gem_object_unreference(obj); 84805b84911SRob Clark return ERR_PTR(ret); 84905b84911SRob Clark } 85005b84911SRob Clark 85105b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev, 85279f0e202SRob Clark struct dma_buf *dmabuf, struct sg_table *sgt) 85305b84911SRob Clark { 85405b84911SRob Clark struct msm_gem_object *msm_obj; 85505b84911SRob Clark struct drm_gem_object *obj; 85679f0e202SRob Clark uint32_t size; 85705b84911SRob Clark int ret, npages; 85805b84911SRob Clark 859871d812aSRob Clark /* if we don't have IOMMU, don't bother pretending we can import: */ 860871d812aSRob Clark if (!iommu_present(&platform_bus_type)) { 861871d812aSRob Clark dev_err(dev->dev, "cannot import without IOMMU\n"); 862871d812aSRob Clark return ERR_PTR(-EINVAL); 863871d812aSRob Clark } 864871d812aSRob Clark 86579f0e202SRob Clark size = PAGE_ALIGN(dmabuf->size); 86605b84911SRob Clark 86790dd57deSJordan Crouse /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 86890dd57deSJordan Crouse mutex_lock(&dev->struct_mutex); 86979f0e202SRob Clark ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 87090dd57deSJordan Crouse mutex_unlock(&dev->struct_mutex); 87190dd57deSJordan Crouse 87205b84911SRob Clark if (ret) 87305b84911SRob Clark goto fail; 87405b84911SRob Clark 87505b84911SRob Clark drm_gem_private_object_init(dev, obj, size); 87605b84911SRob Clark 87705b84911SRob Clark npages = size / PAGE_SIZE; 87805b84911SRob Clark 87905b84911SRob Clark msm_obj = to_msm_bo(obj); 88005b84911SRob Clark msm_obj->sgt = sgt; 8812098105eSMichal Hocko msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 88205b84911SRob Clark if (!msm_obj->pages) { 88305b84911SRob Clark ret = -ENOMEM; 88405b84911SRob Clark goto fail; 88505b84911SRob Clark } 88605b84911SRob Clark 88705b84911SRob Clark ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 88805b84911SRob Clark if (ret) 88905b84911SRob Clark goto fail; 89005b84911SRob Clark 891c8afe684SRob Clark return obj; 892c8afe684SRob Clark 893c8afe684SRob Clark fail: 894c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 895c8afe684SRob Clark return ERR_PTR(ret); 896c8afe684SRob Clark } 897