1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015-2018 Etnaviv Project 4 */ 5 6 #include <drm/drm_prime.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/shmem_fs.h> 9 #include <linux/spinlock.h> 10 #include <linux/vmalloc.h> 11 12 #include "etnaviv_drv.h" 13 #include "etnaviv_gem.h" 14 #include "etnaviv_gpu.h" 15 #include "etnaviv_mmu.h" 16 17 static struct lock_class_key etnaviv_shm_lock_class; 18 static struct lock_class_key etnaviv_userptr_lock_class; 19 20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) 21 { 22 struct drm_device *dev = etnaviv_obj->base.dev; 23 struct sg_table *sgt = etnaviv_obj->sgt; 24 25 /* 26 * For non-cached buffers, ensure the new pages are clean 27 * because display controller, GPU, etc. are not coherent. 28 */ 29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) 30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 31 } 32 33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) 34 { 35 struct drm_device *dev = etnaviv_obj->base.dev; 36 struct sg_table *sgt = etnaviv_obj->sgt; 37 38 /* 39 * For non-cached buffers, ensure the new pages are clean 40 * because display controller, GPU, etc. are not coherent: 41 * 42 * WARNING: The DMA API does not support concurrent CPU 43 * and device access to the memory area. With BIDIRECTIONAL, 44 * we will clean the cache lines which overlap the region, 45 * and invalidate all cache lines (partially) contained in 46 * the region. 47 * 48 * If you have dirty data in the overlapping cache lines, 49 * that will corrupt the GPU-written data. If you have 50 * written into the remainder of the region, this can 51 * discard those writes. 52 */ 53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) 54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); 55 } 56 57 /* called with etnaviv_obj->lock held */ 58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) 59 { 60 struct drm_device *dev = etnaviv_obj->base.dev; 61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base); 62 63 if (IS_ERR(p)) { 64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); 65 return PTR_ERR(p); 66 } 67 68 etnaviv_obj->pages = p; 69 70 return 0; 71 } 72 73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj) 74 { 75 if (etnaviv_obj->sgt) { 76 etnaviv_gem_scatterlist_unmap(etnaviv_obj); 77 sg_free_table(etnaviv_obj->sgt); 78 kfree(etnaviv_obj->sgt); 79 etnaviv_obj->sgt = NULL; 80 } 81 if (etnaviv_obj->pages) { 82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, 83 true, false); 84 85 etnaviv_obj->pages = NULL; 86 } 87 } 88 89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) 90 { 91 int ret; 92 93 lockdep_assert_held(&etnaviv_obj->lock); 94 95 if (!etnaviv_obj->pages) { 96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj); 97 if (ret < 0) 98 return ERR_PTR(ret); 99 } 100 101 if (!etnaviv_obj->sgt) { 102 struct drm_device *dev = etnaviv_obj->base.dev; 103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT; 104 struct sg_table *sgt; 105 106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev, 107 etnaviv_obj->pages, npages); 108 if (IS_ERR(sgt)) { 109 dev_err(dev->dev, "failed to allocate sgt: %ld\n", 110 PTR_ERR(sgt)); 111 return ERR_CAST(sgt); 112 } 113 114 etnaviv_obj->sgt = sgt; 115 116 etnaviv_gem_scatter_map(etnaviv_obj); 117 } 118 119 return etnaviv_obj->pages; 120 } 121 122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) 123 { 124 lockdep_assert_held(&etnaviv_obj->lock); 125 /* when we start tracking the pin count, then do something here */ 126 } 127 128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, 129 struct vm_area_struct *vma) 130 { 131 pgprot_t vm_page_prot; 132 133 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; 134 135 vm_page_prot = vm_get_page_prot(vma->vm_flags); 136 137 if (etnaviv_obj->flags & ETNA_BO_WC) { 138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot); 139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { 140 vma->vm_page_prot = pgprot_noncached(vm_page_prot); 141 } else { 142 /* 143 * Shunt off cached objs to shmem file so they have their own 144 * address_space (so unmap_mapping_range does what we want, 145 * in particular in the case of mmap'd dmabufs) 146 */ 147 vma->vm_pgoff = 0; 148 vma_set_file(vma, etnaviv_obj->base.filp); 149 150 vma->vm_page_prot = vm_page_prot; 151 } 152 153 return 0; 154 } 155 156 static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 157 { 158 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 159 160 return etnaviv_obj->ops->mmap(etnaviv_obj, vma); 161 } 162 163 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) 164 { 165 struct vm_area_struct *vma = vmf->vma; 166 struct drm_gem_object *obj = vma->vm_private_data; 167 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 168 struct page **pages, *page; 169 pgoff_t pgoff; 170 int err; 171 172 /* 173 * Make sure we don't parallel update on a fault, nor move or remove 174 * something from beneath our feet. Note that vmf_insert_page() is 175 * specifically coded to take care of this, so we don't have to. 176 */ 177 err = mutex_lock_interruptible(&etnaviv_obj->lock); 178 if (err) 179 return VM_FAULT_NOPAGE; 180 /* make sure we have pages attached now */ 181 pages = etnaviv_gem_get_pages(etnaviv_obj); 182 mutex_unlock(&etnaviv_obj->lock); 183 184 if (IS_ERR(pages)) { 185 err = PTR_ERR(pages); 186 return vmf_error(err); 187 } 188 189 /* We don't use vmf->pgoff since that has the fake offset: */ 190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 191 192 page = pages[pgoff]; 193 194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 195 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); 196 197 return vmf_insert_page(vma, vmf->address, page); 198 } 199 200 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) 201 { 202 int ret; 203 204 /* Make it mmapable */ 205 ret = drm_gem_create_mmap_offset(obj); 206 if (ret) 207 dev_err(obj->dev->dev, "could not allocate mmap offset\n"); 208 else 209 *offset = drm_vma_node_offset_addr(&obj->vma_node); 210 211 return ret; 212 } 213 214 static struct etnaviv_vram_mapping * 215 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, 216 struct etnaviv_iommu_context *context) 217 { 218 struct etnaviv_vram_mapping *mapping; 219 220 list_for_each_entry(mapping, &obj->vram_list, obj_node) { 221 if (mapping->context == context) 222 return mapping; 223 } 224 225 return NULL; 226 } 227 228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) 229 { 230 struct etnaviv_gem_object *etnaviv_obj = mapping->object; 231 232 mutex_lock(&etnaviv_obj->lock); 233 WARN_ON(mapping->use == 0); 234 mapping->use -= 1; 235 mutex_unlock(&etnaviv_obj->lock); 236 237 drm_gem_object_put(&etnaviv_obj->base); 238 } 239 240 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( 241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context, 242 u64 va) 243 { 244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 245 struct etnaviv_vram_mapping *mapping; 246 struct page **pages; 247 int ret = 0; 248 249 mutex_lock(&etnaviv_obj->lock); 250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); 251 if (mapping) { 252 /* 253 * Holding the object lock prevents the use count changing 254 * beneath us. If the use count is zero, the MMU might be 255 * reaping this object, so take the lock and re-check that 256 * the MMU owns this mapping to close this race. 257 */ 258 if (mapping->use == 0) { 259 mutex_lock(&mmu_context->lock); 260 if (mapping->context == mmu_context) 261 mapping->use += 1; 262 else 263 mapping = NULL; 264 mutex_unlock(&mmu_context->lock); 265 if (mapping) 266 goto out; 267 } else { 268 mapping->use += 1; 269 goto out; 270 } 271 } 272 273 pages = etnaviv_gem_get_pages(etnaviv_obj); 274 if (IS_ERR(pages)) { 275 ret = PTR_ERR(pages); 276 goto out; 277 } 278 279 /* 280 * See if we have a reaped vram mapping we can re-use before 281 * allocating a fresh mapping. 282 */ 283 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); 284 if (!mapping) { 285 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 286 if (!mapping) { 287 ret = -ENOMEM; 288 goto out; 289 } 290 291 INIT_LIST_HEAD(&mapping->scan_node); 292 mapping->object = etnaviv_obj; 293 } else { 294 list_del(&mapping->obj_node); 295 } 296 297 mapping->context = etnaviv_iommu_context_get(mmu_context); 298 mapping->use = 1; 299 300 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, 301 mmu_context->global->memory_base, 302 mapping, va); 303 if (ret < 0) { 304 etnaviv_iommu_context_put(mmu_context); 305 kfree(mapping); 306 } else { 307 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); 308 } 309 310 out: 311 mutex_unlock(&etnaviv_obj->lock); 312 313 if (ret) 314 return ERR_PTR(ret); 315 316 /* Take a reference on the object */ 317 drm_gem_object_get(obj); 318 return mapping; 319 } 320 321 void *etnaviv_gem_vmap(struct drm_gem_object *obj) 322 { 323 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 324 325 if (etnaviv_obj->vaddr) 326 return etnaviv_obj->vaddr; 327 328 mutex_lock(&etnaviv_obj->lock); 329 /* 330 * Need to check again, as we might have raced with another thread 331 * while waiting for the mutex. 332 */ 333 if (!etnaviv_obj->vaddr) 334 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); 335 mutex_unlock(&etnaviv_obj->lock); 336 337 return etnaviv_obj->vaddr; 338 } 339 340 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) 341 { 342 struct page **pages; 343 344 lockdep_assert_held(&obj->lock); 345 346 pages = etnaviv_gem_get_pages(obj); 347 if (IS_ERR(pages)) 348 return NULL; 349 350 return vmap(pages, obj->base.size >> PAGE_SHIFT, 351 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 352 } 353 354 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) 355 { 356 if (op & ETNA_PREP_READ) 357 return DMA_FROM_DEVICE; 358 else if (op & ETNA_PREP_WRITE) 359 return DMA_TO_DEVICE; 360 else 361 return DMA_BIDIRECTIONAL; 362 } 363 364 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, 365 struct drm_etnaviv_timespec *timeout) 366 { 367 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 368 struct drm_device *dev = obj->dev; 369 bool write = !!(op & ETNA_PREP_WRITE); 370 int ret; 371 372 if (!etnaviv_obj->sgt) { 373 void *ret; 374 375 mutex_lock(&etnaviv_obj->lock); 376 ret = etnaviv_gem_get_pages(etnaviv_obj); 377 mutex_unlock(&etnaviv_obj->lock); 378 if (IS_ERR(ret)) 379 return PTR_ERR(ret); 380 } 381 382 if (op & ETNA_PREP_NOSYNC) { 383 if (!dma_resv_test_signaled(obj->resv, 384 dma_resv_usage_rw(write))) 385 return -EBUSY; 386 } else { 387 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 388 389 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 390 true, remain); 391 if (ret <= 0) 392 return ret == 0 ? -ETIMEDOUT : ret; 393 } 394 395 if (etnaviv_obj->flags & ETNA_BO_CACHED) { 396 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, 397 etnaviv_op_to_dma_dir(op)); 398 etnaviv_obj->last_cpu_prep_op = op; 399 } 400 401 return 0; 402 } 403 404 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) 405 { 406 struct drm_device *dev = obj->dev; 407 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 408 409 if (etnaviv_obj->flags & ETNA_BO_CACHED) { 410 /* fini without a prep is almost certainly a userspace error */ 411 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); 412 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, 413 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); 414 etnaviv_obj->last_cpu_prep_op = 0; 415 } 416 417 return 0; 418 } 419 420 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, 421 struct drm_etnaviv_timespec *timeout) 422 { 423 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 424 425 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); 426 } 427 428 #ifdef CONFIG_DEBUG_FS 429 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 430 { 431 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 432 struct dma_resv *robj = obj->resv; 433 unsigned long off = drm_vma_node_start(&obj->vma_node); 434 int r; 435 436 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", 437 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', 438 obj->name, kref_read(&obj->refcount), 439 off, etnaviv_obj->vaddr, obj->size); 440 441 r = dma_resv_lock(robj, NULL); 442 if (r) 443 return; 444 445 dma_resv_describe(robj, m); 446 dma_resv_unlock(robj); 447 } 448 449 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, 450 struct seq_file *m) 451 { 452 struct etnaviv_gem_object *etnaviv_obj; 453 int count = 0; 454 size_t size = 0; 455 456 mutex_lock(&priv->gem_lock); 457 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { 458 struct drm_gem_object *obj = &etnaviv_obj->base; 459 460 seq_puts(m, " "); 461 etnaviv_gem_describe(obj, m); 462 count++; 463 size += obj->size; 464 } 465 mutex_unlock(&priv->gem_lock); 466 467 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 468 } 469 #endif 470 471 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) 472 { 473 vunmap(etnaviv_obj->vaddr); 474 put_pages(etnaviv_obj); 475 } 476 477 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { 478 .get_pages = etnaviv_gem_shmem_get_pages, 479 .release = etnaviv_gem_shmem_release, 480 .vmap = etnaviv_gem_vmap_impl, 481 .mmap = etnaviv_gem_mmap_obj, 482 }; 483 484 void etnaviv_gem_free_object(struct drm_gem_object *obj) 485 { 486 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 487 struct etnaviv_drm_private *priv = obj->dev->dev_private; 488 struct etnaviv_vram_mapping *mapping, *tmp; 489 490 /* object should not be active */ 491 WARN_ON(is_active(etnaviv_obj)); 492 493 mutex_lock(&priv->gem_lock); 494 list_del(&etnaviv_obj->gem_node); 495 mutex_unlock(&priv->gem_lock); 496 497 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, 498 obj_node) { 499 struct etnaviv_iommu_context *context = mapping->context; 500 501 WARN_ON(mapping->use); 502 503 if (context) { 504 etnaviv_iommu_unmap_gem(context, mapping); 505 etnaviv_iommu_context_put(context); 506 } 507 508 list_del(&mapping->obj_node); 509 kfree(mapping); 510 } 511 512 drm_gem_free_mmap_offset(obj); 513 etnaviv_obj->ops->release(etnaviv_obj); 514 drm_gem_object_release(obj); 515 516 kfree(etnaviv_obj); 517 } 518 519 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) 520 { 521 struct etnaviv_drm_private *priv = dev->dev_private; 522 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 523 524 mutex_lock(&priv->gem_lock); 525 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); 526 mutex_unlock(&priv->gem_lock); 527 } 528 529 static const struct vm_operations_struct vm_ops = { 530 .fault = etnaviv_gem_fault, 531 .open = drm_gem_vm_open, 532 .close = drm_gem_vm_close, 533 }; 534 535 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { 536 .free = etnaviv_gem_free_object, 537 .pin = etnaviv_gem_prime_pin, 538 .unpin = etnaviv_gem_prime_unpin, 539 .get_sg_table = etnaviv_gem_prime_get_sg_table, 540 .vmap = etnaviv_gem_prime_vmap, 541 .mmap = etnaviv_gem_mmap, 542 .vm_ops = &vm_ops, 543 }; 544 545 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, 546 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) 547 { 548 struct etnaviv_gem_object *etnaviv_obj; 549 unsigned sz = sizeof(*etnaviv_obj); 550 bool valid = true; 551 552 /* validate flags */ 553 switch (flags & ETNA_BO_CACHE_MASK) { 554 case ETNA_BO_UNCACHED: 555 case ETNA_BO_CACHED: 556 case ETNA_BO_WC: 557 break; 558 default: 559 valid = false; 560 } 561 562 if (!valid) { 563 dev_err(dev->dev, "invalid cache flag: %x\n", 564 (flags & ETNA_BO_CACHE_MASK)); 565 return -EINVAL; 566 } 567 568 etnaviv_obj = kzalloc(sz, GFP_KERNEL); 569 if (!etnaviv_obj) 570 return -ENOMEM; 571 572 etnaviv_obj->flags = flags; 573 etnaviv_obj->ops = ops; 574 575 mutex_init(&etnaviv_obj->lock); 576 INIT_LIST_HEAD(&etnaviv_obj->vram_list); 577 578 *obj = &etnaviv_obj->base; 579 (*obj)->funcs = &etnaviv_gem_object_funcs; 580 581 return 0; 582 } 583 584 /* convenience method to construct a GEM buffer object, and userspace handle */ 585 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, 586 u32 size, u32 flags, u32 *handle) 587 { 588 struct etnaviv_drm_private *priv = dev->dev_private; 589 struct drm_gem_object *obj = NULL; 590 int ret; 591 592 size = PAGE_ALIGN(size); 593 594 ret = etnaviv_gem_new_impl(dev, size, flags, 595 &etnaviv_gem_shmem_ops, &obj); 596 if (ret) 597 goto fail; 598 599 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class); 600 601 ret = drm_gem_object_init(dev, obj, size); 602 if (ret) 603 goto fail; 604 605 /* 606 * Our buffers are kept pinned, so allocating them from the MOVABLE 607 * zone is a really bad idea, and conflicts with CMA. See comments 608 * above new_inode() why this is required _and_ expected if you're 609 * going to pin these pages. 610 */ 611 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask); 612 613 etnaviv_gem_obj_add(dev, obj); 614 615 ret = drm_gem_handle_create(file, obj, handle); 616 617 /* drop reference from allocate - handle holds it now */ 618 fail: 619 drm_gem_object_put(obj); 620 621 return ret; 622 } 623 624 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, 625 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res) 626 { 627 struct drm_gem_object *obj; 628 int ret; 629 630 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); 631 if (ret) 632 return ret; 633 634 drm_gem_private_object_init(dev, obj, size); 635 636 *res = to_etnaviv_bo(obj); 637 638 return 0; 639 } 640 641 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) 642 { 643 struct page **pvec = NULL; 644 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; 645 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; 646 647 might_lock_read(¤t->mm->mmap_lock); 648 649 if (userptr->mm != current->mm) 650 return -EPERM; 651 652 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 653 if (!pvec) 654 return -ENOMEM; 655 656 do { 657 unsigned num_pages = npages - pinned; 658 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; 659 struct page **pages = pvec + pinned; 660 661 ret = pin_user_pages_fast(ptr, num_pages, 662 FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM, 663 pages); 664 if (ret < 0) { 665 unpin_user_pages(pvec, pinned); 666 kvfree(pvec); 667 return ret; 668 } 669 670 pinned += ret; 671 672 } while (pinned < npages); 673 674 etnaviv_obj->pages = pvec; 675 676 return 0; 677 } 678 679 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) 680 { 681 if (etnaviv_obj->sgt) { 682 etnaviv_gem_scatterlist_unmap(etnaviv_obj); 683 sg_free_table(etnaviv_obj->sgt); 684 kfree(etnaviv_obj->sgt); 685 } 686 if (etnaviv_obj->pages) { 687 int npages = etnaviv_obj->base.size >> PAGE_SHIFT; 688 689 unpin_user_pages(etnaviv_obj->pages, npages); 690 kvfree(etnaviv_obj->pages); 691 } 692 } 693 694 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, 695 struct vm_area_struct *vma) 696 { 697 return -EINVAL; 698 } 699 700 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { 701 .get_pages = etnaviv_gem_userptr_get_pages, 702 .release = etnaviv_gem_userptr_release, 703 .vmap = etnaviv_gem_vmap_impl, 704 .mmap = etnaviv_gem_userptr_mmap_obj, 705 }; 706 707 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, 708 uintptr_t ptr, u32 size, u32 flags, u32 *handle) 709 { 710 struct etnaviv_gem_object *etnaviv_obj; 711 int ret; 712 713 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, 714 &etnaviv_gem_userptr_ops, &etnaviv_obj); 715 if (ret) 716 return ret; 717 718 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class); 719 720 etnaviv_obj->userptr.ptr = ptr; 721 etnaviv_obj->userptr.mm = current->mm; 722 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); 723 724 etnaviv_gem_obj_add(dev, &etnaviv_obj->base); 725 726 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); 727 728 /* drop reference from allocate - handle holds it now */ 729 drm_gem_object_put(&etnaviv_obj->base); 730 return ret; 731 } 732