1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2015-2018 Broadcom */ 3 4 /** 5 * DOC: V3D GEM BO management support 6 * 7 * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the 8 * GPU and the bus, allowing us to use shmem objects for our storage 9 * instead of CMA. 10 * 11 * Physically contiguous objects may still be imported to V3D, but the 12 * driver doesn't allocate physically contiguous objects on its own. 13 * Display engines requiring physically contiguous allocations should 14 * look into Mesa's "renderonly" support (as used by the Mesa pl111 15 * driver) for an example of how to integrate with V3D. 16 * 17 * Long term, we should support evicting pages from the MMU when under 18 * memory pressure (thus the v3d_bo_get_pages() refcounting), but 19 * that's not a high priority since our systems tend to not have swap. 20 */ 21 22 #include <linux/dma-buf.h> 23 #include <linux/pfn_t.h> 24 25 #include "v3d_drv.h" 26 #include "uapi/drm/v3d_drm.h" 27 28 /* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps 29 * it for DMA. 30 */ 31 static int 32 v3d_bo_get_pages(struct v3d_bo *bo) 33 { 34 struct drm_gem_object *obj = &bo->base; 35 struct drm_device *dev = obj->dev; 36 int npages = obj->size >> PAGE_SHIFT; 37 int ret = 0; 38 39 mutex_lock(&bo->lock); 40 if (bo->pages_refcount++ != 0) 41 goto unlock; 42 43 if (!obj->import_attach) { 44 bo->pages = drm_gem_get_pages(obj); 45 if (IS_ERR(bo->pages)) { 46 ret = PTR_ERR(bo->pages); 47 goto unlock; 48 } 49 50 bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); 51 if (IS_ERR(bo->sgt)) { 52 ret = PTR_ERR(bo->sgt); 53 goto put_pages; 54 } 55 56 /* Map the pages for use by the GPU. */ 57 dma_map_sg(dev->dev, bo->sgt->sgl, 58 bo->sgt->nents, DMA_BIDIRECTIONAL); 59 } else { 60 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); 61 if (!bo->pages) 62 goto put_pages; 63 64 drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, 65 NULL, npages); 66 67 /* Note that dma-bufs come in mapped. */ 68 } 69 70 mutex_unlock(&bo->lock); 71 72 return 0; 73 74 put_pages: 75 drm_gem_put_pages(obj, bo->pages, true, true); 76 bo->pages = NULL; 77 unlock: 78 bo->pages_refcount--; 79 mutex_unlock(&bo->lock); 80 return ret; 81 } 82 83 static void 84 v3d_bo_put_pages(struct v3d_bo *bo) 85 { 86 struct drm_gem_object *obj = &bo->base; 87 88 mutex_lock(&bo->lock); 89 if (--bo->pages_refcount == 0) { 90 if (!obj->import_attach) { 91 dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, 92 bo->sgt->nents, DMA_BIDIRECTIONAL); 93 sg_free_table(bo->sgt); 94 kfree(bo->sgt); 95 drm_gem_put_pages(obj, bo->pages, true, true); 96 } else { 97 kfree(bo->pages); 98 } 99 } 100 mutex_unlock(&bo->lock); 101 } 102 103 static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, 104 size_t unaligned_size) 105 { 106 struct v3d_dev *v3d = to_v3d_dev(dev); 107 struct drm_gem_object *obj; 108 struct v3d_bo *bo; 109 size_t size = roundup(unaligned_size, PAGE_SIZE); 110 int ret; 111 112 if (size == 0) 113 return ERR_PTR(-EINVAL); 114 115 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 116 if (!bo) 117 return ERR_PTR(-ENOMEM); 118 obj = &bo->base; 119 120 INIT_LIST_HEAD(&bo->vmas); 121 INIT_LIST_HEAD(&bo->unref_head); 122 mutex_init(&bo->lock); 123 124 ret = drm_gem_object_init(dev, obj, size); 125 if (ret) 126 goto free_bo; 127 128 spin_lock(&v3d->mm_lock); 129 ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, 130 obj->size >> PAGE_SHIFT, 131 GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); 132 spin_unlock(&v3d->mm_lock); 133 if (ret) 134 goto free_obj; 135 136 return bo; 137 138 free_obj: 139 drm_gem_object_release(obj); 140 free_bo: 141 kfree(bo); 142 return ERR_PTR(ret); 143 } 144 145 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 146 size_t unaligned_size) 147 { 148 struct v3d_dev *v3d = to_v3d_dev(dev); 149 struct drm_gem_object *obj; 150 struct v3d_bo *bo; 151 int ret; 152 153 bo = v3d_bo_create_struct(dev, unaligned_size); 154 if (IS_ERR(bo)) 155 return bo; 156 obj = &bo->base; 157 158 bo->resv = &bo->_resv; 159 reservation_object_init(bo->resv); 160 161 ret = v3d_bo_get_pages(bo); 162 if (ret) 163 goto free_mm; 164 165 v3d_mmu_insert_ptes(bo); 166 167 mutex_lock(&v3d->bo_lock); 168 v3d->bo_stats.num_allocated++; 169 v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; 170 mutex_unlock(&v3d->bo_lock); 171 172 return bo; 173 174 free_mm: 175 spin_lock(&v3d->mm_lock); 176 drm_mm_remove_node(&bo->node); 177 spin_unlock(&v3d->mm_lock); 178 179 drm_gem_object_release(obj); 180 kfree(bo); 181 return ERR_PTR(ret); 182 } 183 184 /* Called DRM core on the last userspace/kernel unreference of the 185 * BO. 186 */ 187 void v3d_free_object(struct drm_gem_object *obj) 188 { 189 struct v3d_dev *v3d = to_v3d_dev(obj->dev); 190 struct v3d_bo *bo = to_v3d_bo(obj); 191 192 mutex_lock(&v3d->bo_lock); 193 v3d->bo_stats.num_allocated--; 194 v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; 195 mutex_unlock(&v3d->bo_lock); 196 197 reservation_object_fini(&bo->_resv); 198 199 v3d_bo_put_pages(bo); 200 201 if (obj->import_attach) 202 drm_prime_gem_destroy(obj, bo->sgt); 203 204 v3d_mmu_remove_ptes(bo); 205 spin_lock(&v3d->mm_lock); 206 drm_mm_remove_node(&bo->node); 207 spin_unlock(&v3d->mm_lock); 208 209 mutex_destroy(&bo->lock); 210 211 drm_gem_object_release(obj); 212 kfree(bo); 213 } 214 215 struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) 216 { 217 struct v3d_bo *bo = to_v3d_bo(obj); 218 219 return bo->resv; 220 } 221 222 static void 223 v3d_set_mmap_vma_flags(struct vm_area_struct *vma) 224 { 225 vma->vm_flags &= ~VM_PFNMAP; 226 vma->vm_flags |= VM_MIXEDMAP; 227 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 228 } 229 230 vm_fault_t v3d_gem_fault(struct vm_fault *vmf) 231 { 232 struct vm_area_struct *vma = vmf->vma; 233 struct drm_gem_object *obj = vma->vm_private_data; 234 struct v3d_bo *bo = to_v3d_bo(obj); 235 pfn_t pfn; 236 pgoff_t pgoff; 237 238 /* We don't use vmf->pgoff since that has the fake offset: */ 239 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 240 pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); 241 242 return vmf_insert_mixed(vma, vmf->address, pfn); 243 } 244 245 int v3d_mmap(struct file *filp, struct vm_area_struct *vma) 246 { 247 int ret; 248 249 ret = drm_gem_mmap(filp, vma); 250 if (ret) 251 return ret; 252 253 v3d_set_mmap_vma_flags(vma); 254 255 return ret; 256 } 257 258 int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 259 { 260 int ret; 261 262 ret = drm_gem_mmap_obj(obj, obj->size, vma); 263 if (ret < 0) 264 return ret; 265 266 v3d_set_mmap_vma_flags(vma); 267 268 return 0; 269 } 270 271 struct sg_table * 272 v3d_prime_get_sg_table(struct drm_gem_object *obj) 273 { 274 struct v3d_bo *bo = to_v3d_bo(obj); 275 int npages = obj->size >> PAGE_SHIFT; 276 277 return drm_prime_pages_to_sg(bo->pages, npages); 278 } 279 280 struct drm_gem_object * 281 v3d_prime_import_sg_table(struct drm_device *dev, 282 struct dma_buf_attachment *attach, 283 struct sg_table *sgt) 284 { 285 struct drm_gem_object *obj; 286 struct v3d_bo *bo; 287 288 bo = v3d_bo_create_struct(dev, attach->dmabuf->size); 289 if (IS_ERR(bo)) 290 return ERR_CAST(bo); 291 obj = &bo->base; 292 293 bo->resv = attach->dmabuf->resv; 294 295 bo->sgt = sgt; 296 obj->import_attach = attach; 297 v3d_bo_get_pages(bo); 298 299 v3d_mmu_insert_ptes(bo); 300 301 return obj; 302 } 303 304 int v3d_create_bo_ioctl(struct drm_device *dev, void *data, 305 struct drm_file *file_priv) 306 { 307 struct drm_v3d_create_bo *args = data; 308 struct v3d_bo *bo = NULL; 309 int ret; 310 311 if (args->flags != 0) { 312 DRM_INFO("unknown create_bo flags: %d\n", args->flags); 313 return -EINVAL; 314 } 315 316 bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); 317 if (IS_ERR(bo)) 318 return PTR_ERR(bo); 319 320 args->offset = bo->node.start << PAGE_SHIFT; 321 322 ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); 323 drm_gem_object_put_unlocked(&bo->base); 324 325 return ret; 326 } 327 328 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, 329 struct drm_file *file_priv) 330 { 331 struct drm_v3d_mmap_bo *args = data; 332 struct drm_gem_object *gem_obj; 333 int ret; 334 335 if (args->flags != 0) { 336 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 337 return -EINVAL; 338 } 339 340 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 341 if (!gem_obj) { 342 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 343 return -ENOENT; 344 } 345 346 ret = drm_gem_create_mmap_offset(gem_obj); 347 if (ret == 0) 348 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 349 drm_gem_object_put_unlocked(gem_obj); 350 351 return ret; 352 } 353 354 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 355 struct drm_file *file_priv) 356 { 357 struct drm_v3d_get_bo_offset *args = data; 358 struct drm_gem_object *gem_obj; 359 struct v3d_bo *bo; 360 361 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 362 if (!gem_obj) { 363 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 364 return -ENOENT; 365 } 366 bo = to_v3d_bo(gem_obj); 367 368 args->offset = bo->node.start << PAGE_SHIFT; 369 370 drm_gem_object_put_unlocked(gem_obj); 371 return 0; 372 } 373