1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/radeon_drm.h>
35 
36 #include "radeon.h"
37 #include "radeon_prime.h"
38 
39 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
40 					int flags);
41 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
42 int radeon_gem_prime_pin(struct drm_gem_object *obj);
43 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
44 void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
45 void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
46 
47 static const struct drm_gem_object_funcs radeon_gem_object_funcs;
48 
49 static void radeon_gem_object_free(struct drm_gem_object *gobj)
50 {
51 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
52 
53 	if (robj) {
54 		radeon_mn_unregister(robj);
55 		radeon_bo_unref(&robj);
56 	}
57 }
58 
59 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
60 				int alignment, int initial_domain,
61 				u32 flags, bool kernel,
62 				struct drm_gem_object **obj)
63 {
64 	struct radeon_bo *robj;
65 	unsigned long max_size;
66 	int r;
67 
68 	*obj = NULL;
69 	/* At least align on page size */
70 	if (alignment < PAGE_SIZE) {
71 		alignment = PAGE_SIZE;
72 	}
73 
74 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
75 	 * handle vram to system pool migrations.
76 	 */
77 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
78 	if (size > max_size) {
79 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
80 			  size >> 20, max_size >> 20);
81 		return -ENOMEM;
82 	}
83 
84 retry:
85 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
86 			     flags, NULL, NULL, &robj);
87 	if (r) {
88 		if (r != -ERESTARTSYS) {
89 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
90 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
91 				goto retry;
92 			}
93 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
94 				  size, initial_domain, alignment, r);
95 		}
96 		return r;
97 	}
98 	*obj = &robj->tbo.base;
99 	(*obj)->funcs = &radeon_gem_object_funcs;
100 	robj->pid = task_pid_nr(current);
101 
102 	mutex_lock(&rdev->gem.mutex);
103 	list_add_tail(&robj->list, &rdev->gem.objects);
104 	mutex_unlock(&rdev->gem.mutex);
105 
106 	return 0;
107 }
108 
109 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 			  uint32_t rdomain, uint32_t wdomain)
111 {
112 	struct radeon_bo *robj;
113 	uint32_t domain;
114 	long r;
115 
116 	/* FIXME: reeimplement */
117 	robj = gem_to_radeon_bo(gobj);
118 	/* work out where to validate the buffer to */
119 	domain = wdomain;
120 	if (!domain) {
121 		domain = rdomain;
122 	}
123 	if (!domain) {
124 		/* Do nothings */
125 		pr_warn("Set domain without domain !\n");
126 		return 0;
127 	}
128 	if (domain == RADEON_GEM_DOMAIN_CPU) {
129 		/* Asking for cpu access wait for object idle */
130 		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
131 		if (!r)
132 			r = -EBUSY;
133 
134 		if (r < 0 && r != -EINTR) {
135 			pr_err("Failed to wait for object: %li\n", r);
136 			return r;
137 		}
138 	}
139 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
140 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
141 		return -EINVAL;
142 	}
143 	return 0;
144 }
145 
146 int radeon_gem_init(struct radeon_device *rdev)
147 {
148 	INIT_LIST_HEAD(&rdev->gem.objects);
149 	return 0;
150 }
151 
152 void radeon_gem_fini(struct radeon_device *rdev)
153 {
154 	radeon_bo_force_delete(rdev);
155 }
156 
157 /*
158  * Call from drm_gem_handle_create which appear in both new and open ioctl
159  * case.
160  */
161 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
162 {
163 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
164 	struct radeon_device *rdev = rbo->rdev;
165 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
166 	struct radeon_vm *vm = &fpriv->vm;
167 	struct radeon_bo_va *bo_va;
168 	int r;
169 
170 	if ((rdev->family < CHIP_CAYMAN) ||
171 	    (!rdev->accel_working)) {
172 		return 0;
173 	}
174 
175 	r = radeon_bo_reserve(rbo, false);
176 	if (r) {
177 		return r;
178 	}
179 
180 	bo_va = radeon_vm_bo_find(vm, rbo);
181 	if (!bo_va) {
182 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
183 	} else {
184 		++bo_va->ref_count;
185 	}
186 	radeon_bo_unreserve(rbo);
187 
188 	return 0;
189 }
190 
191 static void radeon_gem_object_close(struct drm_gem_object *obj,
192 				    struct drm_file *file_priv)
193 {
194 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
195 	struct radeon_device *rdev = rbo->rdev;
196 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
197 	struct radeon_vm *vm = &fpriv->vm;
198 	struct radeon_bo_va *bo_va;
199 	int r;
200 
201 	if ((rdev->family < CHIP_CAYMAN) ||
202 	    (!rdev->accel_working)) {
203 		return;
204 	}
205 
206 	r = radeon_bo_reserve(rbo, true);
207 	if (r) {
208 		dev_err(rdev->dev, "leaking bo va because "
209 			"we fail to reserve bo (%d)\n", r);
210 		return;
211 	}
212 	bo_va = radeon_vm_bo_find(vm, rbo);
213 	if (bo_va) {
214 		if (--bo_va->ref_count == 0) {
215 			radeon_vm_bo_rmv(rdev, bo_va);
216 		}
217 	}
218 	radeon_bo_unreserve(rbo);
219 }
220 
221 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
222 {
223 	if (r == -EDEADLK) {
224 		r = radeon_gpu_reset(rdev);
225 		if (!r)
226 			r = -EAGAIN;
227 	}
228 	return r;
229 }
230 
231 static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
232 	.free = radeon_gem_object_free,
233 	.open = radeon_gem_object_open,
234 	.close = radeon_gem_object_close,
235 	.export = radeon_gem_prime_export,
236 	.pin = radeon_gem_prime_pin,
237 	.unpin = radeon_gem_prime_unpin,
238 	.get_sg_table = radeon_gem_prime_get_sg_table,
239 	.vmap = radeon_gem_prime_vmap,
240 	.vunmap = radeon_gem_prime_vunmap,
241 };
242 
243 /*
244  * GEM ioctls.
245  */
246 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
247 			  struct drm_file *filp)
248 {
249 	struct radeon_device *rdev = dev->dev_private;
250 	struct drm_radeon_gem_info *args = data;
251 	struct ttm_resource_manager *man;
252 
253 	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
254 
255 	args->vram_size = (u64)man->size << PAGE_SHIFT;
256 	args->vram_visible = rdev->mc.visible_vram_size;
257 	args->vram_visible -= rdev->vram_pin_size;
258 	args->gart_size = rdev->mc.gtt_size;
259 	args->gart_size -= rdev->gart_pin_size;
260 
261 	return 0;
262 }
263 
264 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
265 			   struct drm_file *filp)
266 {
267 	/* TODO: implement */
268 	DRM_ERROR("unimplemented %s\n", __func__);
269 	return -ENOSYS;
270 }
271 
272 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
273 			    struct drm_file *filp)
274 {
275 	/* TODO: implement */
276 	DRM_ERROR("unimplemented %s\n", __func__);
277 	return -ENOSYS;
278 }
279 
280 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
281 			    struct drm_file *filp)
282 {
283 	struct radeon_device *rdev = dev->dev_private;
284 	struct drm_radeon_gem_create *args = data;
285 	struct drm_gem_object *gobj;
286 	uint32_t handle;
287 	int r;
288 
289 	down_read(&rdev->exclusive_lock);
290 	/* create a gem object to contain this object in */
291 	args->size = roundup(args->size, PAGE_SIZE);
292 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
293 				     args->initial_domain, args->flags,
294 				     false, &gobj);
295 	if (r) {
296 		up_read(&rdev->exclusive_lock);
297 		r = radeon_gem_handle_lockup(rdev, r);
298 		return r;
299 	}
300 	r = drm_gem_handle_create(filp, gobj, &handle);
301 	/* drop reference from allocate - handle holds it now */
302 	drm_gem_object_put(gobj);
303 	if (r) {
304 		up_read(&rdev->exclusive_lock);
305 		r = radeon_gem_handle_lockup(rdev, r);
306 		return r;
307 	}
308 	args->handle = handle;
309 	up_read(&rdev->exclusive_lock);
310 	return 0;
311 }
312 
313 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
314 			     struct drm_file *filp)
315 {
316 	struct ttm_operation_ctx ctx = { true, false };
317 	struct radeon_device *rdev = dev->dev_private;
318 	struct drm_radeon_gem_userptr *args = data;
319 	struct drm_gem_object *gobj;
320 	struct radeon_bo *bo;
321 	uint32_t handle;
322 	int r;
323 
324 	args->addr = untagged_addr(args->addr);
325 
326 	if (offset_in_page(args->addr | args->size))
327 		return -EINVAL;
328 
329 	/* reject unknown flag values */
330 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
331 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
332 	    RADEON_GEM_USERPTR_REGISTER))
333 		return -EINVAL;
334 
335 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
336 		/* readonly pages not tested on older hardware */
337 		if (rdev->family < CHIP_R600)
338 			return -EINVAL;
339 
340 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
341 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
342 
343 		/* if we want to write to it we must require anonymous
344 		   memory and install a MMU notifier */
345 		return -EACCES;
346 	}
347 
348 	down_read(&rdev->exclusive_lock);
349 
350 	/* create a gem object to contain this object in */
351 	r = radeon_gem_object_create(rdev, args->size, 0,
352 				     RADEON_GEM_DOMAIN_CPU, 0,
353 				     false, &gobj);
354 	if (r)
355 		goto handle_lockup;
356 
357 	bo = gem_to_radeon_bo(gobj);
358 	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
359 	if (r)
360 		goto release_object;
361 
362 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
363 		r = radeon_mn_register(bo, args->addr);
364 		if (r)
365 			goto release_object;
366 	}
367 
368 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
369 		mmap_read_lock(current->mm);
370 		r = radeon_bo_reserve(bo, true);
371 		if (r) {
372 			mmap_read_unlock(current->mm);
373 			goto release_object;
374 		}
375 
376 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
377 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
378 		radeon_bo_unreserve(bo);
379 		mmap_read_unlock(current->mm);
380 		if (r)
381 			goto release_object;
382 	}
383 
384 	r = drm_gem_handle_create(filp, gobj, &handle);
385 	/* drop reference from allocate - handle holds it now */
386 	drm_gem_object_put(gobj);
387 	if (r)
388 		goto handle_lockup;
389 
390 	args->handle = handle;
391 	up_read(&rdev->exclusive_lock);
392 	return 0;
393 
394 release_object:
395 	drm_gem_object_put(gobj);
396 
397 handle_lockup:
398 	up_read(&rdev->exclusive_lock);
399 	r = radeon_gem_handle_lockup(rdev, r);
400 
401 	return r;
402 }
403 
404 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
405 				struct drm_file *filp)
406 {
407 	/* transition the BO to a domain -
408 	 * just validate the BO into a certain domain */
409 	struct radeon_device *rdev = dev->dev_private;
410 	struct drm_radeon_gem_set_domain *args = data;
411 	struct drm_gem_object *gobj;
412 	struct radeon_bo *robj;
413 	int r;
414 
415 	/* for now if someone requests domain CPU -
416 	 * just make sure the buffer is finished with */
417 	down_read(&rdev->exclusive_lock);
418 
419 	/* just do a BO wait for now */
420 	gobj = drm_gem_object_lookup(filp, args->handle);
421 	if (gobj == NULL) {
422 		up_read(&rdev->exclusive_lock);
423 		return -ENOENT;
424 	}
425 	robj = gem_to_radeon_bo(gobj);
426 
427 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
428 
429 	drm_gem_object_put(gobj);
430 	up_read(&rdev->exclusive_lock);
431 	r = radeon_gem_handle_lockup(robj->rdev, r);
432 	return r;
433 }
434 
435 int radeon_mode_dumb_mmap(struct drm_file *filp,
436 			  struct drm_device *dev,
437 			  uint32_t handle, uint64_t *offset_p)
438 {
439 	struct drm_gem_object *gobj;
440 	struct radeon_bo *robj;
441 
442 	gobj = drm_gem_object_lookup(filp, handle);
443 	if (gobj == NULL) {
444 		return -ENOENT;
445 	}
446 	robj = gem_to_radeon_bo(gobj);
447 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
448 		drm_gem_object_put(gobj);
449 		return -EPERM;
450 	}
451 	*offset_p = radeon_bo_mmap_offset(robj);
452 	drm_gem_object_put(gobj);
453 	return 0;
454 }
455 
456 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
457 			  struct drm_file *filp)
458 {
459 	struct drm_radeon_gem_mmap *args = data;
460 
461 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
462 }
463 
464 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
465 			  struct drm_file *filp)
466 {
467 	struct drm_radeon_gem_busy *args = data;
468 	struct drm_gem_object *gobj;
469 	struct radeon_bo *robj;
470 	int r;
471 	uint32_t cur_placement = 0;
472 
473 	gobj = drm_gem_object_lookup(filp, args->handle);
474 	if (gobj == NULL) {
475 		return -ENOENT;
476 	}
477 	robj = gem_to_radeon_bo(gobj);
478 
479 	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
480 	if (r == 0)
481 		r = -EBUSY;
482 	else
483 		r = 0;
484 
485 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
486 	args->domain = radeon_mem_type_to_domain(cur_placement);
487 	drm_gem_object_put(gobj);
488 	return r;
489 }
490 
491 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
492 			      struct drm_file *filp)
493 {
494 	struct radeon_device *rdev = dev->dev_private;
495 	struct drm_radeon_gem_wait_idle *args = data;
496 	struct drm_gem_object *gobj;
497 	struct radeon_bo *robj;
498 	int r = 0;
499 	uint32_t cur_placement = 0;
500 	long ret;
501 
502 	gobj = drm_gem_object_lookup(filp, args->handle);
503 	if (gobj == NULL) {
504 		return -ENOENT;
505 	}
506 	robj = gem_to_radeon_bo(gobj);
507 
508 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
509 	if (ret == 0)
510 		r = -EBUSY;
511 	else if (ret < 0)
512 		r = ret;
513 
514 	/* Flush HDP cache via MMIO if necessary */
515 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
516 	if (rdev->asic->mmio_hdp_flush &&
517 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
518 		robj->rdev->asic->mmio_hdp_flush(rdev);
519 	drm_gem_object_put(gobj);
520 	r = radeon_gem_handle_lockup(rdev, r);
521 	return r;
522 }
523 
524 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
525 				struct drm_file *filp)
526 {
527 	struct drm_radeon_gem_set_tiling *args = data;
528 	struct drm_gem_object *gobj;
529 	struct radeon_bo *robj;
530 	int r = 0;
531 
532 	DRM_DEBUG("%d \n", args->handle);
533 	gobj = drm_gem_object_lookup(filp, args->handle);
534 	if (gobj == NULL)
535 		return -ENOENT;
536 	robj = gem_to_radeon_bo(gobj);
537 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
538 	drm_gem_object_put(gobj);
539 	return r;
540 }
541 
542 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
543 				struct drm_file *filp)
544 {
545 	struct drm_radeon_gem_get_tiling *args = data;
546 	struct drm_gem_object *gobj;
547 	struct radeon_bo *rbo;
548 	int r = 0;
549 
550 	DRM_DEBUG("\n");
551 	gobj = drm_gem_object_lookup(filp, args->handle);
552 	if (gobj == NULL)
553 		return -ENOENT;
554 	rbo = gem_to_radeon_bo(gobj);
555 	r = radeon_bo_reserve(rbo, false);
556 	if (unlikely(r != 0))
557 		goto out;
558 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
559 	radeon_bo_unreserve(rbo);
560 out:
561 	drm_gem_object_put(gobj);
562 	return r;
563 }
564 
565 /**
566  * radeon_gem_va_update_vm -update the bo_va in its VM
567  *
568  * @rdev: radeon_device pointer
569  * @bo_va: bo_va to update
570  *
571  * Update the bo_va directly after setting it's address. Errors are not
572  * vital here, so they are not reported back to userspace.
573  */
574 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
575 				    struct radeon_bo_va *bo_va)
576 {
577 	struct ttm_validate_buffer tv, *entry;
578 	struct radeon_bo_list *vm_bos;
579 	struct ww_acquire_ctx ticket;
580 	struct list_head list;
581 	unsigned domain;
582 	int r;
583 
584 	INIT_LIST_HEAD(&list);
585 
586 	tv.bo = &bo_va->bo->tbo;
587 	tv.num_shared = 1;
588 	list_add(&tv.head, &list);
589 
590 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
591 	if (!vm_bos)
592 		return;
593 
594 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
595 	if (r)
596 		goto error_free;
597 
598 	list_for_each_entry(entry, &list, head) {
599 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
600 		/* if anything is swapped out don't swap it in here,
601 		   just abort and wait for the next CS */
602 		if (domain == RADEON_GEM_DOMAIN_CPU)
603 			goto error_unreserve;
604 	}
605 
606 	mutex_lock(&bo_va->vm->mutex);
607 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
608 	if (r)
609 		goto error_unlock;
610 
611 	if (bo_va->it.start)
612 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
613 
614 error_unlock:
615 	mutex_unlock(&bo_va->vm->mutex);
616 
617 error_unreserve:
618 	ttm_eu_backoff_reservation(&ticket, &list);
619 
620 error_free:
621 	kvfree(vm_bos);
622 
623 	if (r && r != -ERESTARTSYS)
624 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
625 }
626 
627 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
628 			  struct drm_file *filp)
629 {
630 	struct drm_radeon_gem_va *args = data;
631 	struct drm_gem_object *gobj;
632 	struct radeon_device *rdev = dev->dev_private;
633 	struct radeon_fpriv *fpriv = filp->driver_priv;
634 	struct radeon_bo *rbo;
635 	struct radeon_bo_va *bo_va;
636 	u32 invalid_flags;
637 	int r = 0;
638 
639 	if (!rdev->vm_manager.enabled) {
640 		args->operation = RADEON_VA_RESULT_ERROR;
641 		return -ENOTTY;
642 	}
643 
644 	/* !! DONT REMOVE !!
645 	 * We don't support vm_id yet, to be sure we don't have have broken
646 	 * userspace, reject anyone trying to use non 0 value thus moving
647 	 * forward we can use those fields without breaking existant userspace
648 	 */
649 	if (args->vm_id) {
650 		args->operation = RADEON_VA_RESULT_ERROR;
651 		return -EINVAL;
652 	}
653 
654 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
655 		dev_err(&dev->pdev->dev,
656 			"offset 0x%lX is in reserved area 0x%X\n",
657 			(unsigned long)args->offset,
658 			RADEON_VA_RESERVED_SIZE);
659 		args->operation = RADEON_VA_RESULT_ERROR;
660 		return -EINVAL;
661 	}
662 
663 	/* don't remove, we need to enforce userspace to set the snooped flag
664 	 * otherwise we will endup with broken userspace and we won't be able
665 	 * to enable this feature without adding new interface
666 	 */
667 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
668 	if ((args->flags & invalid_flags)) {
669 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
670 			args->flags, invalid_flags);
671 		args->operation = RADEON_VA_RESULT_ERROR;
672 		return -EINVAL;
673 	}
674 
675 	switch (args->operation) {
676 	case RADEON_VA_MAP:
677 	case RADEON_VA_UNMAP:
678 		break;
679 	default:
680 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
681 			args->operation);
682 		args->operation = RADEON_VA_RESULT_ERROR;
683 		return -EINVAL;
684 	}
685 
686 	gobj = drm_gem_object_lookup(filp, args->handle);
687 	if (gobj == NULL) {
688 		args->operation = RADEON_VA_RESULT_ERROR;
689 		return -ENOENT;
690 	}
691 	rbo = gem_to_radeon_bo(gobj);
692 	r = radeon_bo_reserve(rbo, false);
693 	if (r) {
694 		args->operation = RADEON_VA_RESULT_ERROR;
695 		drm_gem_object_put(gobj);
696 		return r;
697 	}
698 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
699 	if (!bo_va) {
700 		args->operation = RADEON_VA_RESULT_ERROR;
701 		radeon_bo_unreserve(rbo);
702 		drm_gem_object_put(gobj);
703 		return -ENOENT;
704 	}
705 
706 	switch (args->operation) {
707 	case RADEON_VA_MAP:
708 		if (bo_va->it.start) {
709 			args->operation = RADEON_VA_RESULT_VA_EXIST;
710 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
711 			radeon_bo_unreserve(rbo);
712 			goto out;
713 		}
714 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
715 		break;
716 	case RADEON_VA_UNMAP:
717 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
718 		break;
719 	default:
720 		break;
721 	}
722 	if (!r)
723 		radeon_gem_va_update_vm(rdev, bo_va);
724 	args->operation = RADEON_VA_RESULT_OK;
725 	if (r) {
726 		args->operation = RADEON_VA_RESULT_ERROR;
727 	}
728 out:
729 	drm_gem_object_put(gobj);
730 	return r;
731 }
732 
733 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
734 			struct drm_file *filp)
735 {
736 	struct drm_radeon_gem_op *args = data;
737 	struct drm_gem_object *gobj;
738 	struct radeon_bo *robj;
739 	int r;
740 
741 	gobj = drm_gem_object_lookup(filp, args->handle);
742 	if (gobj == NULL) {
743 		return -ENOENT;
744 	}
745 	robj = gem_to_radeon_bo(gobj);
746 
747 	r = -EPERM;
748 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
749 		goto out;
750 
751 	r = radeon_bo_reserve(robj, false);
752 	if (unlikely(r))
753 		goto out;
754 
755 	switch (args->op) {
756 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
757 		args->value = robj->initial_domain;
758 		break;
759 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
760 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
761 						      RADEON_GEM_DOMAIN_GTT |
762 						      RADEON_GEM_DOMAIN_CPU);
763 		break;
764 	default:
765 		r = -EINVAL;
766 	}
767 
768 	radeon_bo_unreserve(robj);
769 out:
770 	drm_gem_object_put(gobj);
771 	return r;
772 }
773 
774 int radeon_mode_dumb_create(struct drm_file *file_priv,
775 			    struct drm_device *dev,
776 			    struct drm_mode_create_dumb *args)
777 {
778 	struct radeon_device *rdev = dev->dev_private;
779 	struct drm_gem_object *gobj;
780 	uint32_t handle;
781 	int r;
782 
783 	args->pitch = radeon_align_pitch(rdev, args->width,
784 					 DIV_ROUND_UP(args->bpp, 8), 0);
785 	args->size = args->pitch * args->height;
786 	args->size = ALIGN(args->size, PAGE_SIZE);
787 
788 	r = radeon_gem_object_create(rdev, args->size, 0,
789 				     RADEON_GEM_DOMAIN_VRAM, 0,
790 				     false, &gobj);
791 	if (r)
792 		return -ENOMEM;
793 
794 	r = drm_gem_handle_create(file_priv, gobj, &handle);
795 	/* drop reference from allocate - handle holds it now */
796 	drm_gem_object_put(gobj);
797 	if (r) {
798 		return r;
799 	}
800 	args->handle = handle;
801 	return 0;
802 }
803 
804 #if defined(CONFIG_DEBUG_FS)
805 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
806 {
807 	struct drm_info_node *node = (struct drm_info_node *)m->private;
808 	struct drm_device *dev = node->minor->dev;
809 	struct radeon_device *rdev = dev->dev_private;
810 	struct radeon_bo *rbo;
811 	unsigned i = 0;
812 
813 	mutex_lock(&rdev->gem.mutex);
814 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
815 		unsigned domain;
816 		const char *placement;
817 
818 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
819 		switch (domain) {
820 		case RADEON_GEM_DOMAIN_VRAM:
821 			placement = "VRAM";
822 			break;
823 		case RADEON_GEM_DOMAIN_GTT:
824 			placement = " GTT";
825 			break;
826 		case RADEON_GEM_DOMAIN_CPU:
827 		default:
828 			placement = " CPU";
829 			break;
830 		}
831 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
832 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
833 			   placement, (unsigned long)rbo->pid);
834 		i++;
835 	}
836 	mutex_unlock(&rdev->gem.mutex);
837 	return 0;
838 }
839 
840 static struct drm_info_list radeon_debugfs_gem_list[] = {
841 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
842 };
843 #endif
844 
845 int radeon_gem_debugfs_init(struct radeon_device *rdev)
846 {
847 #if defined(CONFIG_DEBUG_FS)
848 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
849 #endif
850 	return 0;
851 }
852