1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0 2a8c21a54SThe etnaviv authors /* 3f6ffbd4fSLucas Stach * Copyright (C) 2015-2018 Etnaviv Project 4a8c21a54SThe etnaviv authors */ 5a8c21a54SThe etnaviv authors 6a8c21a54SThe etnaviv authors #include <linux/spinlock.h> 7a8c21a54SThe etnaviv authors #include <linux/shmem_fs.h> 86e84f315SIngo Molnar #include <linux/sched/mm.h> 90881e7bdSIngo Molnar #include <linux/sched/task.h> 10a8c21a54SThe etnaviv authors 11a8c21a54SThe etnaviv authors #include "etnaviv_drv.h" 12a8c21a54SThe etnaviv authors #include "etnaviv_gem.h" 13a8c21a54SThe etnaviv authors #include "etnaviv_gpu.h" 14a8c21a54SThe etnaviv authors #include "etnaviv_mmu.h" 15a8c21a54SThe etnaviv authors 16d6a8743dSLucas Stach static struct lock_class_key etnaviv_shm_lock_class; 17d6a8743dSLucas Stach static struct lock_class_key etnaviv_userptr_lock_class; 18d6a8743dSLucas Stach 19a8c21a54SThe etnaviv authors static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) 20a8c21a54SThe etnaviv authors { 21a8c21a54SThe etnaviv authors struct drm_device *dev = etnaviv_obj->base.dev; 22a8c21a54SThe etnaviv authors struct sg_table *sgt = etnaviv_obj->sgt; 23a8c21a54SThe etnaviv authors 24a8c21a54SThe etnaviv authors /* 25a8c21a54SThe etnaviv authors * For non-cached buffers, ensure the new pages are clean 26a8c21a54SThe etnaviv authors * because display controller, GPU, etc. are not coherent. 27a8c21a54SThe etnaviv authors */ 28a8c21a54SThe etnaviv authors if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) 29a8c21a54SThe etnaviv authors dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); 30a8c21a54SThe etnaviv authors } 31a8c21a54SThe etnaviv authors 32a8c21a54SThe etnaviv authors static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) 33a8c21a54SThe etnaviv authors { 34a8c21a54SThe etnaviv authors struct drm_device *dev = etnaviv_obj->base.dev; 35a8c21a54SThe etnaviv authors struct sg_table *sgt = etnaviv_obj->sgt; 36a8c21a54SThe etnaviv authors 37a8c21a54SThe etnaviv authors /* 38a8c21a54SThe etnaviv authors * For non-cached buffers, ensure the new pages are clean 39a8c21a54SThe etnaviv authors * because display controller, GPU, etc. are not coherent: 40a8c21a54SThe etnaviv authors * 41a8c21a54SThe etnaviv authors * WARNING: The DMA API does not support concurrent CPU 42a8c21a54SThe etnaviv authors * and device access to the memory area. With BIDIRECTIONAL, 43a8c21a54SThe etnaviv authors * we will clean the cache lines which overlap the region, 44a8c21a54SThe etnaviv authors * and invalidate all cache lines (partially) contained in 45a8c21a54SThe etnaviv authors * the region. 46a8c21a54SThe etnaviv authors * 47a8c21a54SThe etnaviv authors * If you have dirty data in the overlapping cache lines, 48a8c21a54SThe etnaviv authors * that will corrupt the GPU-written data. If you have 49a8c21a54SThe etnaviv authors * written into the remainder of the region, this can 50a8c21a54SThe etnaviv authors * discard those writes. 51a8c21a54SThe etnaviv authors */ 52a8c21a54SThe etnaviv authors if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) 53a8c21a54SThe etnaviv authors dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); 54a8c21a54SThe etnaviv authors } 55a8c21a54SThe etnaviv authors 56a8c21a54SThe etnaviv authors /* called with etnaviv_obj->lock held */ 57a8c21a54SThe etnaviv authors static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) 58a8c21a54SThe etnaviv authors { 59a8c21a54SThe etnaviv authors struct drm_device *dev = etnaviv_obj->base.dev; 60a8c21a54SThe etnaviv authors struct page **p = drm_gem_get_pages(&etnaviv_obj->base); 61a8c21a54SThe etnaviv authors 62a8c21a54SThe etnaviv authors if (IS_ERR(p)) { 63f91ac470SLucas Stach dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); 64a8c21a54SThe etnaviv authors return PTR_ERR(p); 65a8c21a54SThe etnaviv authors } 66a8c21a54SThe etnaviv authors 67a8c21a54SThe etnaviv authors etnaviv_obj->pages = p; 68a8c21a54SThe etnaviv authors 69a8c21a54SThe etnaviv authors return 0; 70a8c21a54SThe etnaviv authors } 71a8c21a54SThe etnaviv authors 72a8c21a54SThe etnaviv authors static void put_pages(struct etnaviv_gem_object *etnaviv_obj) 73a8c21a54SThe etnaviv authors { 74a8c21a54SThe etnaviv authors if (etnaviv_obj->sgt) { 75a8c21a54SThe etnaviv authors etnaviv_gem_scatterlist_unmap(etnaviv_obj); 76a8c21a54SThe etnaviv authors sg_free_table(etnaviv_obj->sgt); 77a8c21a54SThe etnaviv authors kfree(etnaviv_obj->sgt); 78a8c21a54SThe etnaviv authors etnaviv_obj->sgt = NULL; 79a8c21a54SThe etnaviv authors } 80a8c21a54SThe etnaviv authors if (etnaviv_obj->pages) { 81a8c21a54SThe etnaviv authors drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, 82a8c21a54SThe etnaviv authors true, false); 83a8c21a54SThe etnaviv authors 84a8c21a54SThe etnaviv authors etnaviv_obj->pages = NULL; 85a8c21a54SThe etnaviv authors } 86a8c21a54SThe etnaviv authors } 87a8c21a54SThe etnaviv authors 88a8c21a54SThe etnaviv authors struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) 89a8c21a54SThe etnaviv authors { 90a8c21a54SThe etnaviv authors int ret; 91a8c21a54SThe etnaviv authors 92a8c21a54SThe etnaviv authors lockdep_assert_held(&etnaviv_obj->lock); 93a8c21a54SThe etnaviv authors 94a8c21a54SThe etnaviv authors if (!etnaviv_obj->pages) { 95a8c21a54SThe etnaviv authors ret = etnaviv_obj->ops->get_pages(etnaviv_obj); 96a8c21a54SThe etnaviv authors if (ret < 0) 97a8c21a54SThe etnaviv authors return ERR_PTR(ret); 98a8c21a54SThe etnaviv authors } 99a8c21a54SThe etnaviv authors 100a8c21a54SThe etnaviv authors if (!etnaviv_obj->sgt) { 101a8c21a54SThe etnaviv authors struct drm_device *dev = etnaviv_obj->base.dev; 102a8c21a54SThe etnaviv authors int npages = etnaviv_obj->base.size >> PAGE_SHIFT; 103a8c21a54SThe etnaviv authors struct sg_table *sgt; 104a8c21a54SThe etnaviv authors 105a8c21a54SThe etnaviv authors sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); 106a8c21a54SThe etnaviv authors if (IS_ERR(sgt)) { 107a8c21a54SThe etnaviv authors dev_err(dev->dev, "failed to allocate sgt: %ld\n", 108a8c21a54SThe etnaviv authors PTR_ERR(sgt)); 109a8c21a54SThe etnaviv authors return ERR_CAST(sgt); 110a8c21a54SThe etnaviv authors } 111a8c21a54SThe etnaviv authors 112a8c21a54SThe etnaviv authors etnaviv_obj->sgt = sgt; 113a8c21a54SThe etnaviv authors 114a8c21a54SThe etnaviv authors etnaviv_gem_scatter_map(etnaviv_obj); 115a8c21a54SThe etnaviv authors } 116a8c21a54SThe etnaviv authors 117a8c21a54SThe etnaviv authors return etnaviv_obj->pages; 118a8c21a54SThe etnaviv authors } 119a8c21a54SThe etnaviv authors 120a8c21a54SThe etnaviv authors void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) 121a8c21a54SThe etnaviv authors { 122a8c21a54SThe etnaviv authors lockdep_assert_held(&etnaviv_obj->lock); 123a8c21a54SThe etnaviv authors /* when we start tracking the pin count, then do something here */ 124a8c21a54SThe etnaviv authors } 125a8c21a54SThe etnaviv authors 1260e7f26e6SLucas Stach static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, 127a8c21a54SThe etnaviv authors struct vm_area_struct *vma) 128a8c21a54SThe etnaviv authors { 129a8c21a54SThe etnaviv authors pgprot_t vm_page_prot; 130a8c21a54SThe etnaviv authors 131a8c21a54SThe etnaviv authors vma->vm_flags &= ~VM_PFNMAP; 132a8c21a54SThe etnaviv authors vma->vm_flags |= VM_MIXEDMAP; 133a8c21a54SThe etnaviv authors 134a8c21a54SThe etnaviv authors vm_page_prot = vm_get_page_prot(vma->vm_flags); 135a8c21a54SThe etnaviv authors 136a8c21a54SThe etnaviv authors if (etnaviv_obj->flags & ETNA_BO_WC) { 137a8c21a54SThe etnaviv authors vma->vm_page_prot = pgprot_writecombine(vm_page_prot); 138a8c21a54SThe etnaviv authors } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { 139a8c21a54SThe etnaviv authors vma->vm_page_prot = pgprot_noncached(vm_page_prot); 140a8c21a54SThe etnaviv authors } else { 141a8c21a54SThe etnaviv authors /* 142a8c21a54SThe etnaviv authors * Shunt off cached objs to shmem file so they have their own 143a8c21a54SThe etnaviv authors * address_space (so unmap_mapping_range does what we want, 144a8c21a54SThe etnaviv authors * in particular in the case of mmap'd dmabufs) 145a8c21a54SThe etnaviv authors */ 146a8c21a54SThe etnaviv authors fput(vma->vm_file); 1470e7f26e6SLucas Stach get_file(etnaviv_obj->base.filp); 148a8c21a54SThe etnaviv authors vma->vm_pgoff = 0; 1490e7f26e6SLucas Stach vma->vm_file = etnaviv_obj->base.filp; 150a8c21a54SThe etnaviv authors 151a8c21a54SThe etnaviv authors vma->vm_page_prot = vm_page_prot; 152a8c21a54SThe etnaviv authors } 153a8c21a54SThe etnaviv authors 154a8c21a54SThe etnaviv authors return 0; 155a8c21a54SThe etnaviv authors } 156a8c21a54SThe etnaviv authors 157a8c21a54SThe etnaviv authors int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) 158a8c21a54SThe etnaviv authors { 159a8c21a54SThe etnaviv authors struct etnaviv_gem_object *obj; 160a8c21a54SThe etnaviv authors int ret; 161a8c21a54SThe etnaviv authors 162a8c21a54SThe etnaviv authors ret = drm_gem_mmap(filp, vma); 163a8c21a54SThe etnaviv authors if (ret) { 164a8c21a54SThe etnaviv authors DBG("mmap failed: %d", ret); 165a8c21a54SThe etnaviv authors return ret; 166a8c21a54SThe etnaviv authors } 167a8c21a54SThe etnaviv authors 168a8c21a54SThe etnaviv authors obj = to_etnaviv_bo(vma->vm_private_data); 169a10e2bdeSLucas Stach return obj->ops->mmap(obj, vma); 170a8c21a54SThe etnaviv authors } 171a8c21a54SThe etnaviv authors 172cfad05a2SSouptick Joarder vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) 173a8c21a54SThe etnaviv authors { 17411bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 175a8c21a54SThe etnaviv authors struct drm_gem_object *obj = vma->vm_private_data; 176a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 177a8c21a54SThe etnaviv authors struct page **pages, *page; 178a8c21a54SThe etnaviv authors pgoff_t pgoff; 179cfad05a2SSouptick Joarder int err; 180a8c21a54SThe etnaviv authors 181a8c21a54SThe etnaviv authors /* 182a8c21a54SThe etnaviv authors * Make sure we don't parallel update on a fault, nor move or remove 183cfad05a2SSouptick Joarder * something from beneath our feet. Note that vmf_insert_page() is 184a8c21a54SThe etnaviv authors * specifically coded to take care of this, so we don't have to. 185a8c21a54SThe etnaviv authors */ 186cfad05a2SSouptick Joarder err = mutex_lock_interruptible(&etnaviv_obj->lock); 187cfad05a2SSouptick Joarder if (err) 188cfad05a2SSouptick Joarder return VM_FAULT_NOPAGE; 189a8c21a54SThe etnaviv authors /* make sure we have pages attached now */ 190a8c21a54SThe etnaviv authors pages = etnaviv_gem_get_pages(etnaviv_obj); 191a8c21a54SThe etnaviv authors mutex_unlock(&etnaviv_obj->lock); 192a8c21a54SThe etnaviv authors 193a8c21a54SThe etnaviv authors if (IS_ERR(pages)) { 194cfad05a2SSouptick Joarder err = PTR_ERR(pages); 195cfad05a2SSouptick Joarder return vmf_error(err); 196a8c21a54SThe etnaviv authors } 197a8c21a54SThe etnaviv authors 198a8c21a54SThe etnaviv authors /* We don't use vmf->pgoff since that has the fake offset: */ 1991a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 200a8c21a54SThe etnaviv authors 201a8c21a54SThe etnaviv authors page = pages[pgoff]; 202a8c21a54SThe etnaviv authors 2031a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 204a8c21a54SThe etnaviv authors page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); 205a8c21a54SThe etnaviv authors 206cfad05a2SSouptick Joarder return vmf_insert_page(vma, vmf->address, page); 207a8c21a54SThe etnaviv authors } 208a8c21a54SThe etnaviv authors 209a8c21a54SThe etnaviv authors int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) 210a8c21a54SThe etnaviv authors { 211a8c21a54SThe etnaviv authors int ret; 212a8c21a54SThe etnaviv authors 213a8c21a54SThe etnaviv authors /* Make it mmapable */ 214a8c21a54SThe etnaviv authors ret = drm_gem_create_mmap_offset(obj); 215a8c21a54SThe etnaviv authors if (ret) 216a8c21a54SThe etnaviv authors dev_err(obj->dev->dev, "could not allocate mmap offset\n"); 217a8c21a54SThe etnaviv authors else 218a8c21a54SThe etnaviv authors *offset = drm_vma_node_offset_addr(&obj->vma_node); 219a8c21a54SThe etnaviv authors 220a8c21a54SThe etnaviv authors return ret; 221a8c21a54SThe etnaviv authors } 222a8c21a54SThe etnaviv authors 223a8c21a54SThe etnaviv authors static struct etnaviv_vram_mapping * 224a8c21a54SThe etnaviv authors etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, 225a8c21a54SThe etnaviv authors struct etnaviv_iommu *mmu) 226a8c21a54SThe etnaviv authors { 227a8c21a54SThe etnaviv authors struct etnaviv_vram_mapping *mapping; 228a8c21a54SThe etnaviv authors 229a8c21a54SThe etnaviv authors list_for_each_entry(mapping, &obj->vram_list, obj_node) { 230a8c21a54SThe etnaviv authors if (mapping->mmu == mmu) 231a8c21a54SThe etnaviv authors return mapping; 232a8c21a54SThe etnaviv authors } 233a8c21a54SThe etnaviv authors 234a8c21a54SThe etnaviv authors return NULL; 235a8c21a54SThe etnaviv authors } 236a8c21a54SThe etnaviv authors 237b6325f40SRussell King void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping) 238b6325f40SRussell King { 239b6325f40SRussell King struct etnaviv_gem_object *etnaviv_obj = mapping->object; 240b6325f40SRussell King 24123d1dd03SCihangir Akturk drm_gem_object_get(&etnaviv_obj->base); 242b6325f40SRussell King 243b6325f40SRussell King mutex_lock(&etnaviv_obj->lock); 244b6325f40SRussell King WARN_ON(mapping->use == 0); 245b6325f40SRussell King mapping->use += 1; 246b6325f40SRussell King mutex_unlock(&etnaviv_obj->lock); 247b6325f40SRussell King } 248b6325f40SRussell King 249b6325f40SRussell King void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) 250b6325f40SRussell King { 251b6325f40SRussell King struct etnaviv_gem_object *etnaviv_obj = mapping->object; 252b6325f40SRussell King 253b6325f40SRussell King mutex_lock(&etnaviv_obj->lock); 254b6325f40SRussell King WARN_ON(mapping->use == 0); 255b6325f40SRussell King mapping->use -= 1; 256b6325f40SRussell King mutex_unlock(&etnaviv_obj->lock); 257b6325f40SRussell King 25823d1dd03SCihangir Akturk drm_gem_object_put_unlocked(&etnaviv_obj->base); 259b6325f40SRussell King } 260b6325f40SRussell King 261b6325f40SRussell King struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( 262b6325f40SRussell King struct drm_gem_object *obj, struct etnaviv_gpu *gpu) 263a8c21a54SThe etnaviv authors { 264a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 265a8c21a54SThe etnaviv authors struct etnaviv_vram_mapping *mapping; 266a8c21a54SThe etnaviv authors struct page **pages; 267a8c21a54SThe etnaviv authors int ret = 0; 268a8c21a54SThe etnaviv authors 269a8c21a54SThe etnaviv authors mutex_lock(&etnaviv_obj->lock); 270a8c21a54SThe etnaviv authors mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); 271a8c21a54SThe etnaviv authors if (mapping) { 272a8c21a54SThe etnaviv authors /* 273a8c21a54SThe etnaviv authors * Holding the object lock prevents the use count changing 274a8c21a54SThe etnaviv authors * beneath us. If the use count is zero, the MMU might be 275a8c21a54SThe etnaviv authors * reaping this object, so take the lock and re-check that 276a8c21a54SThe etnaviv authors * the MMU owns this mapping to close this race. 277a8c21a54SThe etnaviv authors */ 278a8c21a54SThe etnaviv authors if (mapping->use == 0) { 279a8c21a54SThe etnaviv authors mutex_lock(&gpu->mmu->lock); 280a8c21a54SThe etnaviv authors if (mapping->mmu == gpu->mmu) 281a8c21a54SThe etnaviv authors mapping->use += 1; 282a8c21a54SThe etnaviv authors else 283a8c21a54SThe etnaviv authors mapping = NULL; 284a8c21a54SThe etnaviv authors mutex_unlock(&gpu->mmu->lock); 285a8c21a54SThe etnaviv authors if (mapping) 286a8c21a54SThe etnaviv authors goto out; 287a8c21a54SThe etnaviv authors } else { 288a8c21a54SThe etnaviv authors mapping->use += 1; 289a8c21a54SThe etnaviv authors goto out; 290a8c21a54SThe etnaviv authors } 291a8c21a54SThe etnaviv authors } 292a8c21a54SThe etnaviv authors 293a8c21a54SThe etnaviv authors pages = etnaviv_gem_get_pages(etnaviv_obj); 294a8c21a54SThe etnaviv authors if (IS_ERR(pages)) { 295a8c21a54SThe etnaviv authors ret = PTR_ERR(pages); 296a8c21a54SThe etnaviv authors goto out; 297a8c21a54SThe etnaviv authors } 298a8c21a54SThe etnaviv authors 299a8c21a54SThe etnaviv authors /* 300a8c21a54SThe etnaviv authors * See if we have a reaped vram mapping we can re-use before 301a8c21a54SThe etnaviv authors * allocating a fresh mapping. 302a8c21a54SThe etnaviv authors */ 303a8c21a54SThe etnaviv authors mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); 304a8c21a54SThe etnaviv authors if (!mapping) { 305a8c21a54SThe etnaviv authors mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 306ed94add0SDan Carpenter if (!mapping) { 307ed94add0SDan Carpenter ret = -ENOMEM; 308ed94add0SDan Carpenter goto out; 309ed94add0SDan Carpenter } 310a8c21a54SThe etnaviv authors 311a8c21a54SThe etnaviv authors INIT_LIST_HEAD(&mapping->scan_node); 312a8c21a54SThe etnaviv authors mapping->object = etnaviv_obj; 313a8c21a54SThe etnaviv authors } else { 314a8c21a54SThe etnaviv authors list_del(&mapping->obj_node); 315a8c21a54SThe etnaviv authors } 316a8c21a54SThe etnaviv authors 317a8c21a54SThe etnaviv authors mapping->mmu = gpu->mmu; 318a8c21a54SThe etnaviv authors mapping->use = 1; 319a8c21a54SThe etnaviv authors 320a8c21a54SThe etnaviv authors ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, 321a8c21a54SThe etnaviv authors mapping); 322a8c21a54SThe etnaviv authors if (ret < 0) 323a8c21a54SThe etnaviv authors kfree(mapping); 324a8c21a54SThe etnaviv authors else 325a8c21a54SThe etnaviv authors list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); 326a8c21a54SThe etnaviv authors 327a8c21a54SThe etnaviv authors out: 328a8c21a54SThe etnaviv authors mutex_unlock(&etnaviv_obj->lock); 329a8c21a54SThe etnaviv authors 330b6325f40SRussell King if (ret) 331b6325f40SRussell King return ERR_PTR(ret); 332b6325f40SRussell King 333a8c21a54SThe etnaviv authors /* Take a reference on the object */ 33423d1dd03SCihangir Akturk drm_gem_object_get(obj); 335b6325f40SRussell King return mapping; 336a8c21a54SThe etnaviv authors } 337a8c21a54SThe etnaviv authors 338ce3088fdSLucas Stach void *etnaviv_gem_vmap(struct drm_gem_object *obj) 339a8c21a54SThe etnaviv authors { 340a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 341a8c21a54SThe etnaviv authors 342a0a5ab3eSLucas Stach if (etnaviv_obj->vaddr) 343a0a5ab3eSLucas Stach return etnaviv_obj->vaddr; 344a0a5ab3eSLucas Stach 345a8c21a54SThe etnaviv authors mutex_lock(&etnaviv_obj->lock); 346a0a5ab3eSLucas Stach /* 347a0a5ab3eSLucas Stach * Need to check again, as we might have raced with another thread 348a0a5ab3eSLucas Stach * while waiting for the mutex. 349a0a5ab3eSLucas Stach */ 350a0a5ab3eSLucas Stach if (!etnaviv_obj->vaddr) 351a0a5ab3eSLucas Stach etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); 352a8c21a54SThe etnaviv authors mutex_unlock(&etnaviv_obj->lock); 353a8c21a54SThe etnaviv authors 354a8c21a54SThe etnaviv authors return etnaviv_obj->vaddr; 355a8c21a54SThe etnaviv authors } 356a8c21a54SThe etnaviv authors 357a0a5ab3eSLucas Stach static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) 358a0a5ab3eSLucas Stach { 359a0a5ab3eSLucas Stach struct page **pages; 360a0a5ab3eSLucas Stach 361a0a5ab3eSLucas Stach lockdep_assert_held(&obj->lock); 362a0a5ab3eSLucas Stach 363a0a5ab3eSLucas Stach pages = etnaviv_gem_get_pages(obj); 364a0a5ab3eSLucas Stach if (IS_ERR(pages)) 365a0a5ab3eSLucas Stach return NULL; 366a0a5ab3eSLucas Stach 367a0a5ab3eSLucas Stach return vmap(pages, obj->base.size >> PAGE_SHIFT, 368a0a5ab3eSLucas Stach VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 369a0a5ab3eSLucas Stach } 370a0a5ab3eSLucas Stach 371a8c21a54SThe etnaviv authors static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) 372a8c21a54SThe etnaviv authors { 373a8c21a54SThe etnaviv authors if (op & ETNA_PREP_READ) 374a8c21a54SThe etnaviv authors return DMA_FROM_DEVICE; 375a8c21a54SThe etnaviv authors else if (op & ETNA_PREP_WRITE) 376a8c21a54SThe etnaviv authors return DMA_TO_DEVICE; 377a8c21a54SThe etnaviv authors else 378a8c21a54SThe etnaviv authors return DMA_BIDIRECTIONAL; 379a8c21a54SThe etnaviv authors } 380a8c21a54SThe etnaviv authors 381a8c21a54SThe etnaviv authors int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, 382a8c21a54SThe etnaviv authors struct timespec *timeout) 383a8c21a54SThe etnaviv authors { 384a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 385a8c21a54SThe etnaviv authors struct drm_device *dev = obj->dev; 386a8c21a54SThe etnaviv authors bool write = !!(op & ETNA_PREP_WRITE); 38746a269daSLucas Stach int ret; 388a8c21a54SThe etnaviv authors 3898cc47b3eSLucas Stach if (!etnaviv_obj->sgt) { 3908cc47b3eSLucas Stach void *ret; 3918cc47b3eSLucas Stach 3928cc47b3eSLucas Stach mutex_lock(&etnaviv_obj->lock); 3938cc47b3eSLucas Stach ret = etnaviv_gem_get_pages(etnaviv_obj); 3948cc47b3eSLucas Stach mutex_unlock(&etnaviv_obj->lock); 3958cc47b3eSLucas Stach if (IS_ERR(ret)) 3968cc47b3eSLucas Stach return PTR_ERR(ret); 3978cc47b3eSLucas Stach } 3988cc47b3eSLucas Stach 39946a269daSLucas Stach if (op & ETNA_PREP_NOSYNC) { 40046a269daSLucas Stach if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, 40146a269daSLucas Stach write)) 40246a269daSLucas Stach return -EBUSY; 40346a269daSLucas Stach } else { 40446a269daSLucas Stach unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 40546a269daSLucas Stach 40646a269daSLucas Stach ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, 407a8c21a54SThe etnaviv authors write, true, remain); 40846a269daSLucas Stach if (ret <= 0) 40946a269daSLucas Stach return ret == 0 ? -ETIMEDOUT : ret; 41046a269daSLucas Stach } 411a8c21a54SThe etnaviv authors 412a8c21a54SThe etnaviv authors if (etnaviv_obj->flags & ETNA_BO_CACHED) { 413a8c21a54SThe etnaviv authors dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, 414a8c21a54SThe etnaviv authors etnaviv_obj->sgt->nents, 415a8c21a54SThe etnaviv authors etnaviv_op_to_dma_dir(op)); 416a8c21a54SThe etnaviv authors etnaviv_obj->last_cpu_prep_op = op; 417a8c21a54SThe etnaviv authors } 418a8c21a54SThe etnaviv authors 419a8c21a54SThe etnaviv authors return 0; 420a8c21a54SThe etnaviv authors } 421a8c21a54SThe etnaviv authors 422a8c21a54SThe etnaviv authors int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) 423a8c21a54SThe etnaviv authors { 424a8c21a54SThe etnaviv authors struct drm_device *dev = obj->dev; 425a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 426a8c21a54SThe etnaviv authors 427a8c21a54SThe etnaviv authors if (etnaviv_obj->flags & ETNA_BO_CACHED) { 428a8c21a54SThe etnaviv authors /* fini without a prep is almost certainly a userspace error */ 429a8c21a54SThe etnaviv authors WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); 430a8c21a54SThe etnaviv authors dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, 431a8c21a54SThe etnaviv authors etnaviv_obj->sgt->nents, 432a8c21a54SThe etnaviv authors etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); 433a8c21a54SThe etnaviv authors etnaviv_obj->last_cpu_prep_op = 0; 434a8c21a54SThe etnaviv authors } 435a8c21a54SThe etnaviv authors 436a8c21a54SThe etnaviv authors return 0; 437a8c21a54SThe etnaviv authors } 438a8c21a54SThe etnaviv authors 439a8c21a54SThe etnaviv authors int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, 440a8c21a54SThe etnaviv authors struct timespec *timeout) 441a8c21a54SThe etnaviv authors { 442a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 443a8c21a54SThe etnaviv authors 444a8c21a54SThe etnaviv authors return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); 445a8c21a54SThe etnaviv authors } 446a8c21a54SThe etnaviv authors 447a8c21a54SThe etnaviv authors #ifdef CONFIG_DEBUG_FS 448f54d1867SChris Wilson static void etnaviv_gem_describe_fence(struct dma_fence *fence, 449a8c21a54SThe etnaviv authors const char *type, struct seq_file *m) 450a8c21a54SThe etnaviv authors { 451f54d1867SChris Wilson if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 4523415701aSChristian König seq_printf(m, "\t%9s: %s %s seq %llu\n", 453a8c21a54SThe etnaviv authors type, 454a8c21a54SThe etnaviv authors fence->ops->get_driver_name(fence), 455a8c21a54SThe etnaviv authors fence->ops->get_timeline_name(fence), 456a8c21a54SThe etnaviv authors fence->seqno); 457a8c21a54SThe etnaviv authors } 458a8c21a54SThe etnaviv authors 459a8c21a54SThe etnaviv authors static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 460a8c21a54SThe etnaviv authors { 461a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 462a8c21a54SThe etnaviv authors struct reservation_object *robj = etnaviv_obj->resv; 463a8c21a54SThe etnaviv authors struct reservation_object_list *fobj; 464f54d1867SChris Wilson struct dma_fence *fence; 465a8c21a54SThe etnaviv authors unsigned long off = drm_vma_node_start(&obj->vma_node); 466a8c21a54SThe etnaviv authors 467a8c21a54SThe etnaviv authors seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", 468a8c21a54SThe etnaviv authors etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', 4692c935bc5SPeter Zijlstra obj->name, kref_read(&obj->refcount), 470a8c21a54SThe etnaviv authors off, etnaviv_obj->vaddr, obj->size); 471a8c21a54SThe etnaviv authors 472a8c21a54SThe etnaviv authors rcu_read_lock(); 473a8c21a54SThe etnaviv authors fobj = rcu_dereference(robj->fence); 474a8c21a54SThe etnaviv authors if (fobj) { 475a8c21a54SThe etnaviv authors unsigned int i, shared_count = fobj->shared_count; 476a8c21a54SThe etnaviv authors 477a8c21a54SThe etnaviv authors for (i = 0; i < shared_count; i++) { 478a8c21a54SThe etnaviv authors fence = rcu_dereference(fobj->shared[i]); 479a8c21a54SThe etnaviv authors etnaviv_gem_describe_fence(fence, "Shared", m); 480a8c21a54SThe etnaviv authors } 481a8c21a54SThe etnaviv authors } 482a8c21a54SThe etnaviv authors 483a8c21a54SThe etnaviv authors fence = rcu_dereference(robj->fence_excl); 484a8c21a54SThe etnaviv authors if (fence) 485a8c21a54SThe etnaviv authors etnaviv_gem_describe_fence(fence, "Exclusive", m); 486a8c21a54SThe etnaviv authors rcu_read_unlock(); 487a8c21a54SThe etnaviv authors } 488a8c21a54SThe etnaviv authors 489a8c21a54SThe etnaviv authors void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, 490a8c21a54SThe etnaviv authors struct seq_file *m) 491a8c21a54SThe etnaviv authors { 492a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj; 493a8c21a54SThe etnaviv authors int count = 0; 494a8c21a54SThe etnaviv authors size_t size = 0; 495a8c21a54SThe etnaviv authors 496a8c21a54SThe etnaviv authors mutex_lock(&priv->gem_lock); 497a8c21a54SThe etnaviv authors list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { 498a8c21a54SThe etnaviv authors struct drm_gem_object *obj = &etnaviv_obj->base; 499a8c21a54SThe etnaviv authors 500a8c21a54SThe etnaviv authors seq_puts(m, " "); 501a8c21a54SThe etnaviv authors etnaviv_gem_describe(obj, m); 502a8c21a54SThe etnaviv authors count++; 503a8c21a54SThe etnaviv authors size += obj->size; 504a8c21a54SThe etnaviv authors } 505a8c21a54SThe etnaviv authors mutex_unlock(&priv->gem_lock); 506a8c21a54SThe etnaviv authors 507a8c21a54SThe etnaviv authors seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 508a8c21a54SThe etnaviv authors } 509a8c21a54SThe etnaviv authors #endif 510a8c21a54SThe etnaviv authors 511a8c21a54SThe etnaviv authors static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) 512a8c21a54SThe etnaviv authors { 513a8c21a54SThe etnaviv authors vunmap(etnaviv_obj->vaddr); 514a8c21a54SThe etnaviv authors put_pages(etnaviv_obj); 515a8c21a54SThe etnaviv authors } 516a8c21a54SThe etnaviv authors 517a8c21a54SThe etnaviv authors static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { 518a8c21a54SThe etnaviv authors .get_pages = etnaviv_gem_shmem_get_pages, 519a8c21a54SThe etnaviv authors .release = etnaviv_gem_shmem_release, 520a0a5ab3eSLucas Stach .vmap = etnaviv_gem_vmap_impl, 521a10e2bdeSLucas Stach .mmap = etnaviv_gem_mmap_obj, 522a8c21a54SThe etnaviv authors }; 523a8c21a54SThe etnaviv authors 524a8c21a54SThe etnaviv authors void etnaviv_gem_free_object(struct drm_gem_object *obj) 525a8c21a54SThe etnaviv authors { 526a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 52751841752SLucas Stach struct etnaviv_drm_private *priv = obj->dev->dev_private; 528a8c21a54SThe etnaviv authors struct etnaviv_vram_mapping *mapping, *tmp; 529a8c21a54SThe etnaviv authors 530a8c21a54SThe etnaviv authors /* object should not be active */ 531a8c21a54SThe etnaviv authors WARN_ON(is_active(etnaviv_obj)); 532a8c21a54SThe etnaviv authors 53351841752SLucas Stach mutex_lock(&priv->gem_lock); 534a8c21a54SThe etnaviv authors list_del(&etnaviv_obj->gem_node); 53551841752SLucas Stach mutex_unlock(&priv->gem_lock); 536a8c21a54SThe etnaviv authors 537a8c21a54SThe etnaviv authors list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, 538a8c21a54SThe etnaviv authors obj_node) { 539a8c21a54SThe etnaviv authors struct etnaviv_iommu *mmu = mapping->mmu; 540a8c21a54SThe etnaviv authors 541a8c21a54SThe etnaviv authors WARN_ON(mapping->use); 542a8c21a54SThe etnaviv authors 543a8c21a54SThe etnaviv authors if (mmu) 544a8c21a54SThe etnaviv authors etnaviv_iommu_unmap_gem(mmu, mapping); 545a8c21a54SThe etnaviv authors 546a8c21a54SThe etnaviv authors list_del(&mapping->obj_node); 547a8c21a54SThe etnaviv authors kfree(mapping); 548a8c21a54SThe etnaviv authors } 549a8c21a54SThe etnaviv authors 550a8c21a54SThe etnaviv authors drm_gem_free_mmap_offset(obj); 551a8c21a54SThe etnaviv authors etnaviv_obj->ops->release(etnaviv_obj); 552a8c21a54SThe etnaviv authors if (etnaviv_obj->resv == &etnaviv_obj->_resv) 553a8c21a54SThe etnaviv authors reservation_object_fini(&etnaviv_obj->_resv); 554a8c21a54SThe etnaviv authors drm_gem_object_release(obj); 555a8c21a54SThe etnaviv authors 556a8c21a54SThe etnaviv authors kfree(etnaviv_obj); 557a8c21a54SThe etnaviv authors } 558a8c21a54SThe etnaviv authors 55954f09288SLucas Stach void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) 560a8c21a54SThe etnaviv authors { 561a8c21a54SThe etnaviv authors struct etnaviv_drm_private *priv = dev->dev_private; 562a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 563a8c21a54SThe etnaviv authors 564a8c21a54SThe etnaviv authors mutex_lock(&priv->gem_lock); 565a8c21a54SThe etnaviv authors list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); 566a8c21a54SThe etnaviv authors mutex_unlock(&priv->gem_lock); 567a8c21a54SThe etnaviv authors } 568a8c21a54SThe etnaviv authors 569a8c21a54SThe etnaviv authors static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, 570a8c21a54SThe etnaviv authors struct reservation_object *robj, const struct etnaviv_gem_ops *ops, 571a8c21a54SThe etnaviv authors struct drm_gem_object **obj) 572a8c21a54SThe etnaviv authors { 573a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj; 574a8c21a54SThe etnaviv authors unsigned sz = sizeof(*etnaviv_obj); 575a8c21a54SThe etnaviv authors bool valid = true; 576a8c21a54SThe etnaviv authors 577a8c21a54SThe etnaviv authors /* validate flags */ 578a8c21a54SThe etnaviv authors switch (flags & ETNA_BO_CACHE_MASK) { 579a8c21a54SThe etnaviv authors case ETNA_BO_UNCACHED: 580a8c21a54SThe etnaviv authors case ETNA_BO_CACHED: 581a8c21a54SThe etnaviv authors case ETNA_BO_WC: 582a8c21a54SThe etnaviv authors break; 583a8c21a54SThe etnaviv authors default: 584a8c21a54SThe etnaviv authors valid = false; 585a8c21a54SThe etnaviv authors } 586a8c21a54SThe etnaviv authors 587a8c21a54SThe etnaviv authors if (!valid) { 588a8c21a54SThe etnaviv authors dev_err(dev->dev, "invalid cache flag: %x\n", 589a8c21a54SThe etnaviv authors (flags & ETNA_BO_CACHE_MASK)); 590a8c21a54SThe etnaviv authors return -EINVAL; 591a8c21a54SThe etnaviv authors } 592a8c21a54SThe etnaviv authors 593a8c21a54SThe etnaviv authors etnaviv_obj = kzalloc(sz, GFP_KERNEL); 594a8c21a54SThe etnaviv authors if (!etnaviv_obj) 595a8c21a54SThe etnaviv authors return -ENOMEM; 596a8c21a54SThe etnaviv authors 597a8c21a54SThe etnaviv authors etnaviv_obj->flags = flags; 598a8c21a54SThe etnaviv authors etnaviv_obj->ops = ops; 599a8c21a54SThe etnaviv authors if (robj) { 600a8c21a54SThe etnaviv authors etnaviv_obj->resv = robj; 601a8c21a54SThe etnaviv authors } else { 602a8c21a54SThe etnaviv authors etnaviv_obj->resv = &etnaviv_obj->_resv; 603a8c21a54SThe etnaviv authors reservation_object_init(&etnaviv_obj->_resv); 604a8c21a54SThe etnaviv authors } 605a8c21a54SThe etnaviv authors 606a8c21a54SThe etnaviv authors mutex_init(&etnaviv_obj->lock); 607a8c21a54SThe etnaviv authors INIT_LIST_HEAD(&etnaviv_obj->vram_list); 608a8c21a54SThe etnaviv authors 609a8c21a54SThe etnaviv authors *obj = &etnaviv_obj->base; 610a8c21a54SThe etnaviv authors 611a8c21a54SThe etnaviv authors return 0; 612a8c21a54SThe etnaviv authors } 613a8c21a54SThe etnaviv authors 614cdd32563SLucas Stach /* convenience method to construct a GEM buffer object, and userspace handle */ 615cdd32563SLucas Stach int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, 616cdd32563SLucas Stach u32 size, u32 flags, u32 *handle) 617a8c21a54SThe etnaviv authors { 618a8c21a54SThe etnaviv authors struct drm_gem_object *obj = NULL; 619a8c21a54SThe etnaviv authors int ret; 620a8c21a54SThe etnaviv authors 621a8c21a54SThe etnaviv authors size = PAGE_ALIGN(size); 622a8c21a54SThe etnaviv authors 623a8c21a54SThe etnaviv authors ret = etnaviv_gem_new_impl(dev, size, flags, NULL, 624a8c21a54SThe etnaviv authors &etnaviv_gem_shmem_ops, &obj); 625a8c21a54SThe etnaviv authors if (ret) 626a8c21a54SThe etnaviv authors goto fail; 627a8c21a54SThe etnaviv authors 628d6a8743dSLucas Stach lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); 629d6a8743dSLucas Stach 630a8c21a54SThe etnaviv authors ret = drm_gem_object_init(dev, obj, size); 631a8c21a54SThe etnaviv authors if (ret == 0) { 632a8c21a54SThe etnaviv authors struct address_space *mapping; 633a8c21a54SThe etnaviv authors 634a8c21a54SThe etnaviv authors /* 635a8c21a54SThe etnaviv authors * Our buffers are kept pinned, so allocating them 636a8c21a54SThe etnaviv authors * from the MOVABLE zone is a really bad idea, and 637cdd32563SLucas Stach * conflicts with CMA. See comments above new_inode() 638a8c21a54SThe etnaviv authors * why this is required _and_ expected if you're 639a8c21a54SThe etnaviv authors * going to pin these pages. 640a8c21a54SThe etnaviv authors */ 64193c76a3dSAl Viro mapping = obj->filp->f_mapping; 6426cbf0400SLucas Stach mapping_set_gfp_mask(mapping, GFP_HIGHUSER | 64365375b87SLucas Stach __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 644a8c21a54SThe etnaviv authors } 645a8c21a54SThe etnaviv authors 646a8c21a54SThe etnaviv authors if (ret) 647a8c21a54SThe etnaviv authors goto fail; 648a8c21a54SThe etnaviv authors 64954f09288SLucas Stach etnaviv_gem_obj_add(dev, obj); 650a8c21a54SThe etnaviv authors 651a8c21a54SThe etnaviv authors ret = drm_gem_handle_create(file, obj, handle); 652a8c21a54SThe etnaviv authors 653a8c21a54SThe etnaviv authors /* drop reference from allocate - handle holds it now */ 654cdd32563SLucas Stach fail: 65523d1dd03SCihangir Akturk drm_gem_object_put_unlocked(obj); 656a8c21a54SThe etnaviv authors 657a8c21a54SThe etnaviv authors return ret; 658a8c21a54SThe etnaviv authors } 659a8c21a54SThe etnaviv authors 660a8c21a54SThe etnaviv authors int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, 661a8c21a54SThe etnaviv authors struct reservation_object *robj, const struct etnaviv_gem_ops *ops, 662a8c21a54SThe etnaviv authors struct etnaviv_gem_object **res) 663a8c21a54SThe etnaviv authors { 664a8c21a54SThe etnaviv authors struct drm_gem_object *obj; 665a8c21a54SThe etnaviv authors int ret; 666a8c21a54SThe etnaviv authors 667a8c21a54SThe etnaviv authors ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); 668a8c21a54SThe etnaviv authors if (ret) 669a8c21a54SThe etnaviv authors return ret; 670a8c21a54SThe etnaviv authors 671a8c21a54SThe etnaviv authors drm_gem_private_object_init(dev, obj, size); 672a8c21a54SThe etnaviv authors 673a8c21a54SThe etnaviv authors *res = to_etnaviv_bo(obj); 674a8c21a54SThe etnaviv authors 675a8c21a54SThe etnaviv authors return 0; 676a8c21a54SThe etnaviv authors } 677a8c21a54SThe etnaviv authors 678a8c21a54SThe etnaviv authors static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) 679a8c21a54SThe etnaviv authors { 680a8c21a54SThe etnaviv authors struct page **pvec = NULL; 681b2295c24SLucas Stach struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; 682b2295c24SLucas Stach int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; 683a8c21a54SThe etnaviv authors 684783c06cbSLucas Stach might_lock_read(¤t->mm->mmap_sem); 685783c06cbSLucas Stach 686b2295c24SLucas Stach if (userptr->mm != current->mm) 687b2295c24SLucas Stach return -EPERM; 688b2295c24SLucas Stach 689b2295c24SLucas Stach pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 690b2295c24SLucas Stach if (!pvec) 691b2295c24SLucas Stach return -ENOMEM; 692b2295c24SLucas Stach 693b2295c24SLucas Stach do { 694b2295c24SLucas Stach unsigned num_pages = npages - pinned; 695b2295c24SLucas Stach uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; 696b2295c24SLucas Stach struct page **pages = pvec + pinned; 697b2295c24SLucas Stach 698b2295c24SLucas Stach ret = get_user_pages_fast(ptr, num_pages, 699b2295c24SLucas Stach !userptr->ro ? FOLL_WRITE : 0, pages); 700b2295c24SLucas Stach if (ret < 0) { 701b2295c24SLucas Stach release_pages(pvec, pinned); 702b2295c24SLucas Stach kvfree(pvec); 703a8c21a54SThe etnaviv authors return ret; 704a8c21a54SThe etnaviv authors } 705a8c21a54SThe etnaviv authors 706b2295c24SLucas Stach pinned += ret; 707a8c21a54SThe etnaviv authors 708b2295c24SLucas Stach } while (pinned < npages); 709a8c21a54SThe etnaviv authors 710a8c21a54SThe etnaviv authors etnaviv_obj->pages = pvec; 711b2295c24SLucas Stach 712a8c21a54SThe etnaviv authors return 0; 713a8c21a54SThe etnaviv authors } 714a8c21a54SThe etnaviv authors 715a8c21a54SThe etnaviv authors static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) 716a8c21a54SThe etnaviv authors { 717a8c21a54SThe etnaviv authors if (etnaviv_obj->sgt) { 718a8c21a54SThe etnaviv authors etnaviv_gem_scatterlist_unmap(etnaviv_obj); 719a8c21a54SThe etnaviv authors sg_free_table(etnaviv_obj->sgt); 720a8c21a54SThe etnaviv authors kfree(etnaviv_obj->sgt); 721a8c21a54SThe etnaviv authors } 722a8c21a54SThe etnaviv authors if (etnaviv_obj->pages) { 723a8c21a54SThe etnaviv authors int npages = etnaviv_obj->base.size >> PAGE_SHIFT; 724a8c21a54SThe etnaviv authors 725c6f92f9fSMel Gorman release_pages(etnaviv_obj->pages, npages); 7262098105eSMichal Hocko kvfree(etnaviv_obj->pages); 727a8c21a54SThe etnaviv authors } 728a8c21a54SThe etnaviv authors } 729a8c21a54SThe etnaviv authors 730a10e2bdeSLucas Stach static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, 731a10e2bdeSLucas Stach struct vm_area_struct *vma) 732a10e2bdeSLucas Stach { 733a10e2bdeSLucas Stach return -EINVAL; 734a10e2bdeSLucas Stach } 735a10e2bdeSLucas Stach 736a8c21a54SThe etnaviv authors static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { 737a8c21a54SThe etnaviv authors .get_pages = etnaviv_gem_userptr_get_pages, 738a8c21a54SThe etnaviv authors .release = etnaviv_gem_userptr_release, 739a0a5ab3eSLucas Stach .vmap = etnaviv_gem_vmap_impl, 740a10e2bdeSLucas Stach .mmap = etnaviv_gem_userptr_mmap_obj, 741a8c21a54SThe etnaviv authors }; 742a8c21a54SThe etnaviv authors 743a8c21a54SThe etnaviv authors int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, 744a8c21a54SThe etnaviv authors uintptr_t ptr, u32 size, u32 flags, u32 *handle) 745a8c21a54SThe etnaviv authors { 746a8c21a54SThe etnaviv authors struct etnaviv_gem_object *etnaviv_obj; 747a8c21a54SThe etnaviv authors int ret; 748a8c21a54SThe etnaviv authors 749a8c21a54SThe etnaviv authors ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, 750a8c21a54SThe etnaviv authors &etnaviv_gem_userptr_ops, &etnaviv_obj); 751a8c21a54SThe etnaviv authors if (ret) 752a8c21a54SThe etnaviv authors return ret; 753a8c21a54SThe etnaviv authors 754d6a8743dSLucas Stach lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class); 755d6a8743dSLucas Stach 756a8c21a54SThe etnaviv authors etnaviv_obj->userptr.ptr = ptr; 757b2295c24SLucas Stach etnaviv_obj->userptr.mm = current->mm; 758a8c21a54SThe etnaviv authors etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); 759a8c21a54SThe etnaviv authors 76054f09288SLucas Stach etnaviv_gem_obj_add(dev, &etnaviv_obj->base); 761a8c21a54SThe etnaviv authors 762a8c21a54SThe etnaviv authors ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); 76354f09288SLucas Stach 764a8c21a54SThe etnaviv authors /* drop reference from allocate - handle holds it now */ 76523d1dd03SCihangir Akturk drm_gem_object_put_unlocked(&etnaviv_obj->base); 766a8c21a54SThe etnaviv authors return ret; 767a8c21a54SThe etnaviv authors } 768