1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 32 void radeon_gem_object_free(struct drm_gem_object *gobj) 33 { 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 35 36 if (robj) { 37 if (robj->gem_base.import_attach) 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 39 radeon_bo_unref(&robj); 40 } 41 } 42 43 int radeon_gem_object_create(struct radeon_device *rdev, int size, 44 int alignment, int initial_domain, 45 bool discardable, bool kernel, 46 struct drm_gem_object **obj) 47 { 48 struct radeon_bo *robj; 49 unsigned long max_size; 50 int r; 51 52 *obj = NULL; 53 /* At least align on page size */ 54 if (alignment < PAGE_SIZE) { 55 alignment = PAGE_SIZE; 56 } 57 58 /* maximun bo size is the minimun btw visible vram and gtt size */ 59 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); 60 if (size > max_size) { 61 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", 62 __func__, __LINE__, size >> 20, max_size >> 20); 63 return -ENOMEM; 64 } 65 66 retry: 67 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); 68 if (r) { 69 if (r != -ERESTARTSYS) { 70 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 71 initial_domain |= RADEON_GEM_DOMAIN_GTT; 72 goto retry; 73 } 74 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 75 size, initial_domain, alignment, r); 76 } 77 return r; 78 } 79 *obj = &robj->gem_base; 80 robj->pid = task_pid_nr(current); 81 82 mutex_lock(&rdev->gem.mutex); 83 list_add_tail(&robj->list, &rdev->gem.objects); 84 mutex_unlock(&rdev->gem.mutex); 85 86 return 0; 87 } 88 89 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 90 uint32_t rdomain, uint32_t wdomain) 91 { 92 struct radeon_bo *robj; 93 uint32_t domain; 94 int r; 95 96 /* FIXME: reeimplement */ 97 robj = gem_to_radeon_bo(gobj); 98 /* work out where to validate the buffer to */ 99 domain = wdomain; 100 if (!domain) { 101 domain = rdomain; 102 } 103 if (!domain) { 104 /* Do nothings */ 105 printk(KERN_WARNING "Set domain without domain !\n"); 106 return 0; 107 } 108 if (domain == RADEON_GEM_DOMAIN_CPU) { 109 /* Asking for cpu access wait for object idle */ 110 r = radeon_bo_wait(robj, NULL, false); 111 if (r) { 112 printk(KERN_ERR "Failed to wait for object !\n"); 113 return r; 114 } 115 } 116 return 0; 117 } 118 119 int radeon_gem_init(struct radeon_device *rdev) 120 { 121 INIT_LIST_HEAD(&rdev->gem.objects); 122 return 0; 123 } 124 125 void radeon_gem_fini(struct radeon_device *rdev) 126 { 127 radeon_bo_force_delete(rdev); 128 } 129 130 /* 131 * Call from drm_gem_handle_create which appear in both new and open ioctl 132 * case. 133 */ 134 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 135 { 136 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 137 struct radeon_device *rdev = rbo->rdev; 138 struct radeon_fpriv *fpriv = file_priv->driver_priv; 139 struct radeon_vm *vm = &fpriv->vm; 140 struct radeon_bo_va *bo_va; 141 int r; 142 143 if (rdev->family < CHIP_CAYMAN) { 144 return 0; 145 } 146 147 r = radeon_bo_reserve(rbo, false); 148 if (r) { 149 return r; 150 } 151 152 bo_va = radeon_vm_bo_find(vm, rbo); 153 if (!bo_va) { 154 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 155 } else { 156 ++bo_va->ref_count; 157 } 158 radeon_bo_unreserve(rbo); 159 160 return 0; 161 } 162 163 void radeon_gem_object_close(struct drm_gem_object *obj, 164 struct drm_file *file_priv) 165 { 166 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 167 struct radeon_device *rdev = rbo->rdev; 168 struct radeon_fpriv *fpriv = file_priv->driver_priv; 169 struct radeon_vm *vm = &fpriv->vm; 170 struct radeon_bo_va *bo_va; 171 int r; 172 173 if (rdev->family < CHIP_CAYMAN) { 174 return; 175 } 176 177 r = radeon_bo_reserve(rbo, true); 178 if (r) { 179 dev_err(rdev->dev, "leaking bo va because " 180 "we fail to reserve bo (%d)\n", r); 181 return; 182 } 183 bo_va = radeon_vm_bo_find(vm, rbo); 184 if (bo_va) { 185 if (--bo_va->ref_count == 0) { 186 radeon_vm_bo_rmv(rdev, bo_va); 187 } 188 } 189 radeon_bo_unreserve(rbo); 190 } 191 192 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 193 { 194 if (r == -EDEADLK) { 195 r = radeon_gpu_reset(rdev); 196 if (!r) 197 r = -EAGAIN; 198 } 199 return r; 200 } 201 202 /* 203 * GEM ioctls. 204 */ 205 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 206 struct drm_file *filp) 207 { 208 struct radeon_device *rdev = dev->dev_private; 209 struct drm_radeon_gem_info *args = data; 210 struct ttm_mem_type_manager *man; 211 unsigned i; 212 213 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 214 215 args->vram_size = rdev->mc.real_vram_size; 216 args->vram_visible = (u64)man->size << PAGE_SHIFT; 217 if (rdev->stollen_vga_memory) 218 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 219 args->vram_visible -= radeon_fbdev_total_size(rdev); 220 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 221 for(i = 0; i < RADEON_NUM_RINGS; ++i) 222 args->gart_size -= rdev->ring[i].ring_size; 223 return 0; 224 } 225 226 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 227 struct drm_file *filp) 228 { 229 /* TODO: implement */ 230 DRM_ERROR("unimplemented %s\n", __func__); 231 return -ENOSYS; 232 } 233 234 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 235 struct drm_file *filp) 236 { 237 /* TODO: implement */ 238 DRM_ERROR("unimplemented %s\n", __func__); 239 return -ENOSYS; 240 } 241 242 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 243 struct drm_file *filp) 244 { 245 struct radeon_device *rdev = dev->dev_private; 246 struct drm_radeon_gem_create *args = data; 247 struct drm_gem_object *gobj; 248 uint32_t handle; 249 int r; 250 251 down_read(&rdev->exclusive_lock); 252 /* create a gem object to contain this object in */ 253 args->size = roundup(args->size, PAGE_SIZE); 254 r = radeon_gem_object_create(rdev, args->size, args->alignment, 255 args->initial_domain, false, 256 false, &gobj); 257 if (r) { 258 up_read(&rdev->exclusive_lock); 259 r = radeon_gem_handle_lockup(rdev, r); 260 return r; 261 } 262 r = drm_gem_handle_create(filp, gobj, &handle); 263 /* drop reference from allocate - handle holds it now */ 264 drm_gem_object_unreference_unlocked(gobj); 265 if (r) { 266 up_read(&rdev->exclusive_lock); 267 r = radeon_gem_handle_lockup(rdev, r); 268 return r; 269 } 270 args->handle = handle; 271 up_read(&rdev->exclusive_lock); 272 return 0; 273 } 274 275 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 276 struct drm_file *filp) 277 { 278 /* transition the BO to a domain - 279 * just validate the BO into a certain domain */ 280 struct radeon_device *rdev = dev->dev_private; 281 struct drm_radeon_gem_set_domain *args = data; 282 struct drm_gem_object *gobj; 283 struct radeon_bo *robj; 284 int r; 285 286 /* for now if someone requests domain CPU - 287 * just make sure the buffer is finished with */ 288 down_read(&rdev->exclusive_lock); 289 290 /* just do a BO wait for now */ 291 gobj = drm_gem_object_lookup(dev, filp, args->handle); 292 if (gobj == NULL) { 293 up_read(&rdev->exclusive_lock); 294 return -ENOENT; 295 } 296 robj = gem_to_radeon_bo(gobj); 297 298 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 299 300 drm_gem_object_unreference_unlocked(gobj); 301 up_read(&rdev->exclusive_lock); 302 r = radeon_gem_handle_lockup(robj->rdev, r); 303 return r; 304 } 305 306 int radeon_mode_dumb_mmap(struct drm_file *filp, 307 struct drm_device *dev, 308 uint32_t handle, uint64_t *offset_p) 309 { 310 struct drm_gem_object *gobj; 311 struct radeon_bo *robj; 312 313 gobj = drm_gem_object_lookup(dev, filp, handle); 314 if (gobj == NULL) { 315 return -ENOENT; 316 } 317 robj = gem_to_radeon_bo(gobj); 318 *offset_p = radeon_bo_mmap_offset(robj); 319 drm_gem_object_unreference_unlocked(gobj); 320 return 0; 321 } 322 323 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 324 struct drm_file *filp) 325 { 326 struct drm_radeon_gem_mmap *args = data; 327 328 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 329 } 330 331 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 332 struct drm_file *filp) 333 { 334 struct radeon_device *rdev = dev->dev_private; 335 struct drm_radeon_gem_busy *args = data; 336 struct drm_gem_object *gobj; 337 struct radeon_bo *robj; 338 int r; 339 uint32_t cur_placement = 0; 340 341 gobj = drm_gem_object_lookup(dev, filp, args->handle); 342 if (gobj == NULL) { 343 return -ENOENT; 344 } 345 robj = gem_to_radeon_bo(gobj); 346 r = radeon_bo_wait(robj, &cur_placement, true); 347 args->domain = radeon_mem_type_to_domain(cur_placement); 348 drm_gem_object_unreference_unlocked(gobj); 349 r = radeon_gem_handle_lockup(rdev, r); 350 return r; 351 } 352 353 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 354 struct drm_file *filp) 355 { 356 struct radeon_device *rdev = dev->dev_private; 357 struct drm_radeon_gem_wait_idle *args = data; 358 struct drm_gem_object *gobj; 359 struct radeon_bo *robj; 360 int r; 361 362 gobj = drm_gem_object_lookup(dev, filp, args->handle); 363 if (gobj == NULL) { 364 return -ENOENT; 365 } 366 robj = gem_to_radeon_bo(gobj); 367 r = radeon_bo_wait(robj, NULL, false); 368 /* callback hw specific functions if any */ 369 if (rdev->asic->ioctl_wait_idle) 370 robj->rdev->asic->ioctl_wait_idle(rdev, robj); 371 drm_gem_object_unreference_unlocked(gobj); 372 r = radeon_gem_handle_lockup(rdev, r); 373 return r; 374 } 375 376 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 377 struct drm_file *filp) 378 { 379 struct drm_radeon_gem_set_tiling *args = data; 380 struct drm_gem_object *gobj; 381 struct radeon_bo *robj; 382 int r = 0; 383 384 DRM_DEBUG("%d \n", args->handle); 385 gobj = drm_gem_object_lookup(dev, filp, args->handle); 386 if (gobj == NULL) 387 return -ENOENT; 388 robj = gem_to_radeon_bo(gobj); 389 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 390 drm_gem_object_unreference_unlocked(gobj); 391 return r; 392 } 393 394 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 395 struct drm_file *filp) 396 { 397 struct drm_radeon_gem_get_tiling *args = data; 398 struct drm_gem_object *gobj; 399 struct radeon_bo *rbo; 400 int r = 0; 401 402 DRM_DEBUG("\n"); 403 gobj = drm_gem_object_lookup(dev, filp, args->handle); 404 if (gobj == NULL) 405 return -ENOENT; 406 rbo = gem_to_radeon_bo(gobj); 407 r = radeon_bo_reserve(rbo, false); 408 if (unlikely(r != 0)) 409 goto out; 410 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 411 radeon_bo_unreserve(rbo); 412 out: 413 drm_gem_object_unreference_unlocked(gobj); 414 return r; 415 } 416 417 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 418 struct drm_file *filp) 419 { 420 struct drm_radeon_gem_va *args = data; 421 struct drm_gem_object *gobj; 422 struct radeon_device *rdev = dev->dev_private; 423 struct radeon_fpriv *fpriv = filp->driver_priv; 424 struct radeon_bo *rbo; 425 struct radeon_bo_va *bo_va; 426 u32 invalid_flags; 427 int r = 0; 428 429 if (!rdev->vm_manager.enabled) { 430 args->operation = RADEON_VA_RESULT_ERROR; 431 return -ENOTTY; 432 } 433 434 /* !! DONT REMOVE !! 435 * We don't support vm_id yet, to be sure we don't have have broken 436 * userspace, reject anyone trying to use non 0 value thus moving 437 * forward we can use those fields without breaking existant userspace 438 */ 439 if (args->vm_id) { 440 args->operation = RADEON_VA_RESULT_ERROR; 441 return -EINVAL; 442 } 443 444 if (args->offset < RADEON_VA_RESERVED_SIZE) { 445 dev_err(&dev->pdev->dev, 446 "offset 0x%lX is in reserved area 0x%X\n", 447 (unsigned long)args->offset, 448 RADEON_VA_RESERVED_SIZE); 449 args->operation = RADEON_VA_RESULT_ERROR; 450 return -EINVAL; 451 } 452 453 /* don't remove, we need to enforce userspace to set the snooped flag 454 * otherwise we will endup with broken userspace and we won't be able 455 * to enable this feature without adding new interface 456 */ 457 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 458 if ((args->flags & invalid_flags)) { 459 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 460 args->flags, invalid_flags); 461 args->operation = RADEON_VA_RESULT_ERROR; 462 return -EINVAL; 463 } 464 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { 465 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n"); 466 args->operation = RADEON_VA_RESULT_ERROR; 467 return -EINVAL; 468 } 469 470 switch (args->operation) { 471 case RADEON_VA_MAP: 472 case RADEON_VA_UNMAP: 473 break; 474 default: 475 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 476 args->operation); 477 args->operation = RADEON_VA_RESULT_ERROR; 478 return -EINVAL; 479 } 480 481 gobj = drm_gem_object_lookup(dev, filp, args->handle); 482 if (gobj == NULL) { 483 args->operation = RADEON_VA_RESULT_ERROR; 484 return -ENOENT; 485 } 486 rbo = gem_to_radeon_bo(gobj); 487 r = radeon_bo_reserve(rbo, false); 488 if (r) { 489 args->operation = RADEON_VA_RESULT_ERROR; 490 drm_gem_object_unreference_unlocked(gobj); 491 return r; 492 } 493 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 494 if (!bo_va) { 495 args->operation = RADEON_VA_RESULT_ERROR; 496 drm_gem_object_unreference_unlocked(gobj); 497 return -ENOENT; 498 } 499 500 switch (args->operation) { 501 case RADEON_VA_MAP: 502 if (bo_va->soffset) { 503 args->operation = RADEON_VA_RESULT_VA_EXIST; 504 args->offset = bo_va->soffset; 505 goto out; 506 } 507 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 508 break; 509 case RADEON_VA_UNMAP: 510 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 511 break; 512 default: 513 break; 514 } 515 args->operation = RADEON_VA_RESULT_OK; 516 if (r) { 517 args->operation = RADEON_VA_RESULT_ERROR; 518 } 519 out: 520 radeon_bo_unreserve(rbo); 521 drm_gem_object_unreference_unlocked(gobj); 522 return r; 523 } 524 525 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 526 struct drm_file *filp) 527 { 528 struct drm_radeon_gem_op *args = data; 529 struct drm_gem_object *gobj; 530 struct radeon_bo *robj; 531 int r; 532 533 gobj = drm_gem_object_lookup(dev, filp, args->handle); 534 if (gobj == NULL) { 535 return -ENOENT; 536 } 537 robj = gem_to_radeon_bo(gobj); 538 r = radeon_bo_reserve(robj, false); 539 if (unlikely(r)) 540 goto out; 541 542 switch (args->op) { 543 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 544 args->value = robj->initial_domain; 545 break; 546 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 547 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 548 RADEON_GEM_DOMAIN_GTT | 549 RADEON_GEM_DOMAIN_CPU); 550 break; 551 default: 552 r = -EINVAL; 553 } 554 555 radeon_bo_unreserve(robj); 556 out: 557 drm_gem_object_unreference_unlocked(gobj); 558 return r; 559 } 560 561 int radeon_mode_dumb_create(struct drm_file *file_priv, 562 struct drm_device *dev, 563 struct drm_mode_create_dumb *args) 564 { 565 struct radeon_device *rdev = dev->dev_private; 566 struct drm_gem_object *gobj; 567 uint32_t handle; 568 int r; 569 570 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 571 args->size = args->pitch * args->height; 572 args->size = ALIGN(args->size, PAGE_SIZE); 573 574 r = radeon_gem_object_create(rdev, args->size, 0, 575 RADEON_GEM_DOMAIN_VRAM, 576 false, ttm_bo_type_device, 577 &gobj); 578 if (r) 579 return -ENOMEM; 580 581 r = drm_gem_handle_create(file_priv, gobj, &handle); 582 /* drop reference from allocate - handle holds it now */ 583 drm_gem_object_unreference_unlocked(gobj); 584 if (r) { 585 return r; 586 } 587 args->handle = handle; 588 return 0; 589 } 590 591 #if defined(CONFIG_DEBUG_FS) 592 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 593 { 594 struct drm_info_node *node = (struct drm_info_node *)m->private; 595 struct drm_device *dev = node->minor->dev; 596 struct radeon_device *rdev = dev->dev_private; 597 struct radeon_bo *rbo; 598 unsigned i = 0; 599 600 mutex_lock(&rdev->gem.mutex); 601 list_for_each_entry(rbo, &rdev->gem.objects, list) { 602 unsigned domain; 603 const char *placement; 604 605 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 606 switch (domain) { 607 case RADEON_GEM_DOMAIN_VRAM: 608 placement = "VRAM"; 609 break; 610 case RADEON_GEM_DOMAIN_GTT: 611 placement = " GTT"; 612 break; 613 case RADEON_GEM_DOMAIN_CPU: 614 default: 615 placement = " CPU"; 616 break; 617 } 618 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 619 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 620 placement, (unsigned long)rbo->pid); 621 i++; 622 } 623 mutex_unlock(&rdev->gem.mutex); 624 return 0; 625 } 626 627 static struct drm_info_list radeon_debugfs_gem_list[] = { 628 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 629 }; 630 #endif 631 632 int radeon_gem_debugfs_init(struct radeon_device *rdev) 633 { 634 #if defined(CONFIG_DEBUG_FS) 635 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 636 #endif 637 return 0; 638 } 639