1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 
36 	if (robj) {
37 		if (robj->gem_base.import_attach)
38 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 		radeon_mn_unregister(robj);
40 		radeon_bo_unref(&robj);
41 	}
42 }
43 
44 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
45 				int alignment, int initial_domain,
46 				u32 flags, bool kernel,
47 				struct drm_gem_object **obj)
48 {
49 	struct radeon_bo *robj;
50 	unsigned long max_size;
51 	int r;
52 
53 	*obj = NULL;
54 	/* At least align on page size */
55 	if (alignment < PAGE_SIZE) {
56 		alignment = PAGE_SIZE;
57 	}
58 
59 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
60 	 * handle vram to system pool migrations.
61 	 */
62 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
63 	if (size > max_size) {
64 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
65 			  size >> 20, max_size >> 20);
66 		return -ENOMEM;
67 	}
68 
69 retry:
70 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
71 			     flags, NULL, NULL, &robj);
72 	if (r) {
73 		if (r != -ERESTARTSYS) {
74 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
75 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
76 				goto retry;
77 			}
78 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
79 				  size, initial_domain, alignment, r);
80 		}
81 		return r;
82 	}
83 	*obj = &robj->gem_base;
84 	robj->pid = task_pid_nr(current);
85 
86 	mutex_lock(&rdev->gem.mutex);
87 	list_add_tail(&robj->list, &rdev->gem.objects);
88 	mutex_unlock(&rdev->gem.mutex);
89 
90 	return 0;
91 }
92 
93 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94 			  uint32_t rdomain, uint32_t wdomain)
95 {
96 	struct radeon_bo *robj;
97 	uint32_t domain;
98 	long r;
99 
100 	/* FIXME: reeimplement */
101 	robj = gem_to_radeon_bo(gobj);
102 	/* work out where to validate the buffer to */
103 	domain = wdomain;
104 	if (!domain) {
105 		domain = rdomain;
106 	}
107 	if (!domain) {
108 		/* Do nothings */
109 		pr_warn("Set domain without domain !\n");
110 		return 0;
111 	}
112 	if (domain == RADEON_GEM_DOMAIN_CPU) {
113 		/* Asking for cpu access wait for object idle */
114 		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
115 		if (!r)
116 			r = -EBUSY;
117 
118 		if (r < 0 && r != -EINTR) {
119 			pr_err("Failed to wait for object: %li\n", r);
120 			return r;
121 		}
122 	}
123 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
124 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
125 		return -EINVAL;
126 	}
127 	return 0;
128 }
129 
130 int radeon_gem_init(struct radeon_device *rdev)
131 {
132 	INIT_LIST_HEAD(&rdev->gem.objects);
133 	return 0;
134 }
135 
136 void radeon_gem_fini(struct radeon_device *rdev)
137 {
138 	radeon_bo_force_delete(rdev);
139 }
140 
141 /*
142  * Call from drm_gem_handle_create which appear in both new and open ioctl
143  * case.
144  */
145 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
146 {
147 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
148 	struct radeon_device *rdev = rbo->rdev;
149 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
150 	struct radeon_vm *vm = &fpriv->vm;
151 	struct radeon_bo_va *bo_va;
152 	int r;
153 
154 	if ((rdev->family < CHIP_CAYMAN) ||
155 	    (!rdev->accel_working)) {
156 		return 0;
157 	}
158 
159 	r = radeon_bo_reserve(rbo, false);
160 	if (r) {
161 		return r;
162 	}
163 
164 	bo_va = radeon_vm_bo_find(vm, rbo);
165 	if (!bo_va) {
166 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
167 	} else {
168 		++bo_va->ref_count;
169 	}
170 	radeon_bo_unreserve(rbo);
171 
172 	return 0;
173 }
174 
175 void radeon_gem_object_close(struct drm_gem_object *obj,
176 			     struct drm_file *file_priv)
177 {
178 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
179 	struct radeon_device *rdev = rbo->rdev;
180 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
181 	struct radeon_vm *vm = &fpriv->vm;
182 	struct radeon_bo_va *bo_va;
183 	int r;
184 
185 	if ((rdev->family < CHIP_CAYMAN) ||
186 	    (!rdev->accel_working)) {
187 		return;
188 	}
189 
190 	r = radeon_bo_reserve(rbo, true);
191 	if (r) {
192 		dev_err(rdev->dev, "leaking bo va because "
193 			"we fail to reserve bo (%d)\n", r);
194 		return;
195 	}
196 	bo_va = radeon_vm_bo_find(vm, rbo);
197 	if (bo_va) {
198 		if (--bo_va->ref_count == 0) {
199 			radeon_vm_bo_rmv(rdev, bo_va);
200 		}
201 	}
202 	radeon_bo_unreserve(rbo);
203 }
204 
205 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
206 {
207 	if (r == -EDEADLK) {
208 		r = radeon_gpu_reset(rdev);
209 		if (!r)
210 			r = -EAGAIN;
211 	}
212 	return r;
213 }
214 
215 /*
216  * GEM ioctls.
217  */
218 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
219 			  struct drm_file *filp)
220 {
221 	struct radeon_device *rdev = dev->dev_private;
222 	struct drm_radeon_gem_info *args = data;
223 	struct ttm_mem_type_manager *man;
224 
225 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
226 
227 	args->vram_size = (u64)man->size << PAGE_SHIFT;
228 	args->vram_visible = rdev->mc.visible_vram_size;
229 	args->vram_visible -= rdev->vram_pin_size;
230 	args->gart_size = rdev->mc.gtt_size;
231 	args->gart_size -= rdev->gart_pin_size;
232 
233 	return 0;
234 }
235 
236 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
237 			   struct drm_file *filp)
238 {
239 	/* TODO: implement */
240 	DRM_ERROR("unimplemented %s\n", __func__);
241 	return -ENOSYS;
242 }
243 
244 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
245 			    struct drm_file *filp)
246 {
247 	/* TODO: implement */
248 	DRM_ERROR("unimplemented %s\n", __func__);
249 	return -ENOSYS;
250 }
251 
252 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
253 			    struct drm_file *filp)
254 {
255 	struct radeon_device *rdev = dev->dev_private;
256 	struct drm_radeon_gem_create *args = data;
257 	struct drm_gem_object *gobj;
258 	uint32_t handle;
259 	int r;
260 
261 	down_read(&rdev->exclusive_lock);
262 	/* create a gem object to contain this object in */
263 	args->size = roundup(args->size, PAGE_SIZE);
264 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
265 				     args->initial_domain, args->flags,
266 				     false, &gobj);
267 	if (r) {
268 		up_read(&rdev->exclusive_lock);
269 		r = radeon_gem_handle_lockup(rdev, r);
270 		return r;
271 	}
272 	r = drm_gem_handle_create(filp, gobj, &handle);
273 	/* drop reference from allocate - handle holds it now */
274 	drm_gem_object_unreference_unlocked(gobj);
275 	if (r) {
276 		up_read(&rdev->exclusive_lock);
277 		r = radeon_gem_handle_lockup(rdev, r);
278 		return r;
279 	}
280 	args->handle = handle;
281 	up_read(&rdev->exclusive_lock);
282 	return 0;
283 }
284 
285 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
286 			     struct drm_file *filp)
287 {
288 	struct radeon_device *rdev = dev->dev_private;
289 	struct drm_radeon_gem_userptr *args = data;
290 	struct drm_gem_object *gobj;
291 	struct radeon_bo *bo;
292 	uint32_t handle;
293 	int r;
294 
295 	if (offset_in_page(args->addr | args->size))
296 		return -EINVAL;
297 
298 	/* reject unknown flag values */
299 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
300 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
301 	    RADEON_GEM_USERPTR_REGISTER))
302 		return -EINVAL;
303 
304 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
305 		/* readonly pages not tested on older hardware */
306 		if (rdev->family < CHIP_R600)
307 			return -EINVAL;
308 
309 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
310 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
311 
312 		/* if we want to write to it we must require anonymous
313 		   memory and install a MMU notifier */
314 		return -EACCES;
315 	}
316 
317 	down_read(&rdev->exclusive_lock);
318 
319 	/* create a gem object to contain this object in */
320 	r = radeon_gem_object_create(rdev, args->size, 0,
321 				     RADEON_GEM_DOMAIN_CPU, 0,
322 				     false, &gobj);
323 	if (r)
324 		goto handle_lockup;
325 
326 	bo = gem_to_radeon_bo(gobj);
327 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
328 	if (r)
329 		goto release_object;
330 
331 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
332 		r = radeon_mn_register(bo, args->addr);
333 		if (r)
334 			goto release_object;
335 	}
336 
337 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
338 		down_read(&current->mm->mmap_sem);
339 		r = radeon_bo_reserve(bo, true);
340 		if (r) {
341 			up_read(&current->mm->mmap_sem);
342 			goto release_object;
343 		}
344 
345 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
346 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
347 		radeon_bo_unreserve(bo);
348 		up_read(&current->mm->mmap_sem);
349 		if (r)
350 			goto release_object;
351 	}
352 
353 	r = drm_gem_handle_create(filp, gobj, &handle);
354 	/* drop reference from allocate - handle holds it now */
355 	drm_gem_object_unreference_unlocked(gobj);
356 	if (r)
357 		goto handle_lockup;
358 
359 	args->handle = handle;
360 	up_read(&rdev->exclusive_lock);
361 	return 0;
362 
363 release_object:
364 	drm_gem_object_unreference_unlocked(gobj);
365 
366 handle_lockup:
367 	up_read(&rdev->exclusive_lock);
368 	r = radeon_gem_handle_lockup(rdev, r);
369 
370 	return r;
371 }
372 
373 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
374 				struct drm_file *filp)
375 {
376 	/* transition the BO to a domain -
377 	 * just validate the BO into a certain domain */
378 	struct radeon_device *rdev = dev->dev_private;
379 	struct drm_radeon_gem_set_domain *args = data;
380 	struct drm_gem_object *gobj;
381 	struct radeon_bo *robj;
382 	int r;
383 
384 	/* for now if someone requests domain CPU -
385 	 * just make sure the buffer is finished with */
386 	down_read(&rdev->exclusive_lock);
387 
388 	/* just do a BO wait for now */
389 	gobj = drm_gem_object_lookup(filp, args->handle);
390 	if (gobj == NULL) {
391 		up_read(&rdev->exclusive_lock);
392 		return -ENOENT;
393 	}
394 	robj = gem_to_radeon_bo(gobj);
395 
396 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
397 
398 	drm_gem_object_unreference_unlocked(gobj);
399 	up_read(&rdev->exclusive_lock);
400 	r = radeon_gem_handle_lockup(robj->rdev, r);
401 	return r;
402 }
403 
404 int radeon_mode_dumb_mmap(struct drm_file *filp,
405 			  struct drm_device *dev,
406 			  uint32_t handle, uint64_t *offset_p)
407 {
408 	struct drm_gem_object *gobj;
409 	struct radeon_bo *robj;
410 
411 	gobj = drm_gem_object_lookup(filp, handle);
412 	if (gobj == NULL) {
413 		return -ENOENT;
414 	}
415 	robj = gem_to_radeon_bo(gobj);
416 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
417 		drm_gem_object_unreference_unlocked(gobj);
418 		return -EPERM;
419 	}
420 	*offset_p = radeon_bo_mmap_offset(robj);
421 	drm_gem_object_unreference_unlocked(gobj);
422 	return 0;
423 }
424 
425 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
426 			  struct drm_file *filp)
427 {
428 	struct drm_radeon_gem_mmap *args = data;
429 
430 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
431 }
432 
433 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
434 			  struct drm_file *filp)
435 {
436 	struct drm_radeon_gem_busy *args = data;
437 	struct drm_gem_object *gobj;
438 	struct radeon_bo *robj;
439 	int r;
440 	uint32_t cur_placement = 0;
441 
442 	gobj = drm_gem_object_lookup(filp, args->handle);
443 	if (gobj == NULL) {
444 		return -ENOENT;
445 	}
446 	robj = gem_to_radeon_bo(gobj);
447 
448 	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
449 	if (r == 0)
450 		r = -EBUSY;
451 	else
452 		r = 0;
453 
454 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
455 	args->domain = radeon_mem_type_to_domain(cur_placement);
456 	drm_gem_object_unreference_unlocked(gobj);
457 	return r;
458 }
459 
460 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
461 			      struct drm_file *filp)
462 {
463 	struct radeon_device *rdev = dev->dev_private;
464 	struct drm_radeon_gem_wait_idle *args = data;
465 	struct drm_gem_object *gobj;
466 	struct radeon_bo *robj;
467 	int r = 0;
468 	uint32_t cur_placement = 0;
469 	long ret;
470 
471 	gobj = drm_gem_object_lookup(filp, args->handle);
472 	if (gobj == NULL) {
473 		return -ENOENT;
474 	}
475 	robj = gem_to_radeon_bo(gobj);
476 
477 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
478 	if (ret == 0)
479 		r = -EBUSY;
480 	else if (ret < 0)
481 		r = ret;
482 
483 	/* Flush HDP cache via MMIO if necessary */
484 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
485 	if (rdev->asic->mmio_hdp_flush &&
486 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
487 		robj->rdev->asic->mmio_hdp_flush(rdev);
488 	drm_gem_object_unreference_unlocked(gobj);
489 	r = radeon_gem_handle_lockup(rdev, r);
490 	return r;
491 }
492 
493 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
494 				struct drm_file *filp)
495 {
496 	struct drm_radeon_gem_set_tiling *args = data;
497 	struct drm_gem_object *gobj;
498 	struct radeon_bo *robj;
499 	int r = 0;
500 
501 	DRM_DEBUG("%d \n", args->handle);
502 	gobj = drm_gem_object_lookup(filp, args->handle);
503 	if (gobj == NULL)
504 		return -ENOENT;
505 	robj = gem_to_radeon_bo(gobj);
506 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
507 	drm_gem_object_unreference_unlocked(gobj);
508 	return r;
509 }
510 
511 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
512 				struct drm_file *filp)
513 {
514 	struct drm_radeon_gem_get_tiling *args = data;
515 	struct drm_gem_object *gobj;
516 	struct radeon_bo *rbo;
517 	int r = 0;
518 
519 	DRM_DEBUG("\n");
520 	gobj = drm_gem_object_lookup(filp, args->handle);
521 	if (gobj == NULL)
522 		return -ENOENT;
523 	rbo = gem_to_radeon_bo(gobj);
524 	r = radeon_bo_reserve(rbo, false);
525 	if (unlikely(r != 0))
526 		goto out;
527 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
528 	radeon_bo_unreserve(rbo);
529 out:
530 	drm_gem_object_unreference_unlocked(gobj);
531 	return r;
532 }
533 
534 /**
535  * radeon_gem_va_update_vm -update the bo_va in its VM
536  *
537  * @rdev: radeon_device pointer
538  * @bo_va: bo_va to update
539  *
540  * Update the bo_va directly after setting it's address. Errors are not
541  * vital here, so they are not reported back to userspace.
542  */
543 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
544 				    struct radeon_bo_va *bo_va)
545 {
546 	struct ttm_validate_buffer tv, *entry;
547 	struct radeon_bo_list *vm_bos;
548 	struct ww_acquire_ctx ticket;
549 	struct list_head list;
550 	unsigned domain;
551 	int r;
552 
553 	INIT_LIST_HEAD(&list);
554 
555 	tv.bo = &bo_va->bo->tbo;
556 	tv.shared = true;
557 	list_add(&tv.head, &list);
558 
559 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
560 	if (!vm_bos)
561 		return;
562 
563 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
564 	if (r)
565 		goto error_free;
566 
567 	list_for_each_entry(entry, &list, head) {
568 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
569 		/* if anything is swapped out don't swap it in here,
570 		   just abort and wait for the next CS */
571 		if (domain == RADEON_GEM_DOMAIN_CPU)
572 			goto error_unreserve;
573 	}
574 
575 	mutex_lock(&bo_va->vm->mutex);
576 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
577 	if (r)
578 		goto error_unlock;
579 
580 	if (bo_va->it.start)
581 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
582 
583 error_unlock:
584 	mutex_unlock(&bo_va->vm->mutex);
585 
586 error_unreserve:
587 	ttm_eu_backoff_reservation(&ticket, &list);
588 
589 error_free:
590 	kvfree(vm_bos);
591 
592 	if (r && r != -ERESTARTSYS)
593 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
594 }
595 
596 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
597 			  struct drm_file *filp)
598 {
599 	struct drm_radeon_gem_va *args = data;
600 	struct drm_gem_object *gobj;
601 	struct radeon_device *rdev = dev->dev_private;
602 	struct radeon_fpriv *fpriv = filp->driver_priv;
603 	struct radeon_bo *rbo;
604 	struct radeon_bo_va *bo_va;
605 	u32 invalid_flags;
606 	int r = 0;
607 
608 	if (!rdev->vm_manager.enabled) {
609 		args->operation = RADEON_VA_RESULT_ERROR;
610 		return -ENOTTY;
611 	}
612 
613 	/* !! DONT REMOVE !!
614 	 * We don't support vm_id yet, to be sure we don't have have broken
615 	 * userspace, reject anyone trying to use non 0 value thus moving
616 	 * forward we can use those fields without breaking existant userspace
617 	 */
618 	if (args->vm_id) {
619 		args->operation = RADEON_VA_RESULT_ERROR;
620 		return -EINVAL;
621 	}
622 
623 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
624 		dev_err(&dev->pdev->dev,
625 			"offset 0x%lX is in reserved area 0x%X\n",
626 			(unsigned long)args->offset,
627 			RADEON_VA_RESERVED_SIZE);
628 		args->operation = RADEON_VA_RESULT_ERROR;
629 		return -EINVAL;
630 	}
631 
632 	/* don't remove, we need to enforce userspace to set the snooped flag
633 	 * otherwise we will endup with broken userspace and we won't be able
634 	 * to enable this feature without adding new interface
635 	 */
636 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
637 	if ((args->flags & invalid_flags)) {
638 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
639 			args->flags, invalid_flags);
640 		args->operation = RADEON_VA_RESULT_ERROR;
641 		return -EINVAL;
642 	}
643 
644 	switch (args->operation) {
645 	case RADEON_VA_MAP:
646 	case RADEON_VA_UNMAP:
647 		break;
648 	default:
649 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
650 			args->operation);
651 		args->operation = RADEON_VA_RESULT_ERROR;
652 		return -EINVAL;
653 	}
654 
655 	gobj = drm_gem_object_lookup(filp, args->handle);
656 	if (gobj == NULL) {
657 		args->operation = RADEON_VA_RESULT_ERROR;
658 		return -ENOENT;
659 	}
660 	rbo = gem_to_radeon_bo(gobj);
661 	r = radeon_bo_reserve(rbo, false);
662 	if (r) {
663 		args->operation = RADEON_VA_RESULT_ERROR;
664 		drm_gem_object_unreference_unlocked(gobj);
665 		return r;
666 	}
667 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
668 	if (!bo_va) {
669 		args->operation = RADEON_VA_RESULT_ERROR;
670 		radeon_bo_unreserve(rbo);
671 		drm_gem_object_unreference_unlocked(gobj);
672 		return -ENOENT;
673 	}
674 
675 	switch (args->operation) {
676 	case RADEON_VA_MAP:
677 		if (bo_va->it.start) {
678 			args->operation = RADEON_VA_RESULT_VA_EXIST;
679 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
680 			radeon_bo_unreserve(rbo);
681 			goto out;
682 		}
683 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
684 		break;
685 	case RADEON_VA_UNMAP:
686 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
687 		break;
688 	default:
689 		break;
690 	}
691 	if (!r)
692 		radeon_gem_va_update_vm(rdev, bo_va);
693 	args->operation = RADEON_VA_RESULT_OK;
694 	if (r) {
695 		args->operation = RADEON_VA_RESULT_ERROR;
696 	}
697 out:
698 	drm_gem_object_unreference_unlocked(gobj);
699 	return r;
700 }
701 
702 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
703 			struct drm_file *filp)
704 {
705 	struct drm_radeon_gem_op *args = data;
706 	struct drm_gem_object *gobj;
707 	struct radeon_bo *robj;
708 	int r;
709 
710 	gobj = drm_gem_object_lookup(filp, args->handle);
711 	if (gobj == NULL) {
712 		return -ENOENT;
713 	}
714 	robj = gem_to_radeon_bo(gobj);
715 
716 	r = -EPERM;
717 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
718 		goto out;
719 
720 	r = radeon_bo_reserve(robj, false);
721 	if (unlikely(r))
722 		goto out;
723 
724 	switch (args->op) {
725 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
726 		args->value = robj->initial_domain;
727 		break;
728 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
729 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
730 						      RADEON_GEM_DOMAIN_GTT |
731 						      RADEON_GEM_DOMAIN_CPU);
732 		break;
733 	default:
734 		r = -EINVAL;
735 	}
736 
737 	radeon_bo_unreserve(robj);
738 out:
739 	drm_gem_object_unreference_unlocked(gobj);
740 	return r;
741 }
742 
743 int radeon_mode_dumb_create(struct drm_file *file_priv,
744 			    struct drm_device *dev,
745 			    struct drm_mode_create_dumb *args)
746 {
747 	struct radeon_device *rdev = dev->dev_private;
748 	struct drm_gem_object *gobj;
749 	uint32_t handle;
750 	int r;
751 
752 	args->pitch = radeon_align_pitch(rdev, args->width,
753 					 DIV_ROUND_UP(args->bpp, 8), 0);
754 	args->size = args->pitch * args->height;
755 	args->size = ALIGN(args->size, PAGE_SIZE);
756 
757 	r = radeon_gem_object_create(rdev, args->size, 0,
758 				     RADEON_GEM_DOMAIN_VRAM, 0,
759 				     false, &gobj);
760 	if (r)
761 		return -ENOMEM;
762 
763 	r = drm_gem_handle_create(file_priv, gobj, &handle);
764 	/* drop reference from allocate - handle holds it now */
765 	drm_gem_object_unreference_unlocked(gobj);
766 	if (r) {
767 		return r;
768 	}
769 	args->handle = handle;
770 	return 0;
771 }
772 
773 #if defined(CONFIG_DEBUG_FS)
774 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
775 {
776 	struct drm_info_node *node = (struct drm_info_node *)m->private;
777 	struct drm_device *dev = node->minor->dev;
778 	struct radeon_device *rdev = dev->dev_private;
779 	struct radeon_bo *rbo;
780 	unsigned i = 0;
781 
782 	mutex_lock(&rdev->gem.mutex);
783 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
784 		unsigned domain;
785 		const char *placement;
786 
787 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
788 		switch (domain) {
789 		case RADEON_GEM_DOMAIN_VRAM:
790 			placement = "VRAM";
791 			break;
792 		case RADEON_GEM_DOMAIN_GTT:
793 			placement = " GTT";
794 			break;
795 		case RADEON_GEM_DOMAIN_CPU:
796 		default:
797 			placement = " CPU";
798 			break;
799 		}
800 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
801 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
802 			   placement, (unsigned long)rbo->pid);
803 		i++;
804 	}
805 	mutex_unlock(&rdev->gem.mutex);
806 	return 0;
807 }
808 
809 static struct drm_info_list radeon_debugfs_gem_list[] = {
810 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
811 };
812 #endif
813 
814 int radeon_gem_debugfs_init(struct radeon_device *rdev)
815 {
816 #if defined(CONFIG_DEBUG_FS)
817 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
818 #endif
819 	return 0;
820 }
821