1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
36 
37 #include "radeon.h"
38 #include "radeon_prime.h"
39 
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 					int flags);
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45 
46 static const struct drm_gem_object_funcs radeon_gem_object_funcs;
47 
48 static void radeon_gem_object_free(struct drm_gem_object *gobj)
49 {
50 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
51 
52 	if (robj) {
53 		radeon_mn_unregister(robj);
54 		radeon_bo_unref(&robj);
55 	}
56 }
57 
58 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
59 				int alignment, int initial_domain,
60 				u32 flags, bool kernel,
61 				struct drm_gem_object **obj)
62 {
63 	struct radeon_bo *robj;
64 	unsigned long max_size;
65 	int r;
66 
67 	*obj = NULL;
68 	/* At least align on page size */
69 	if (alignment < PAGE_SIZE) {
70 		alignment = PAGE_SIZE;
71 	}
72 
73 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
74 	 * handle vram to system pool migrations.
75 	 */
76 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
77 	if (size > max_size) {
78 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
79 			  size >> 20, max_size >> 20);
80 		return -ENOMEM;
81 	}
82 
83 retry:
84 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
85 			     flags, NULL, NULL, &robj);
86 	if (r) {
87 		if (r != -ERESTARTSYS) {
88 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
89 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
90 				goto retry;
91 			}
92 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
93 				  size, initial_domain, alignment, r);
94 		}
95 		return r;
96 	}
97 	*obj = &robj->tbo.base;
98 	(*obj)->funcs = &radeon_gem_object_funcs;
99 	robj->pid = task_pid_nr(current);
100 
101 	mutex_lock(&rdev->gem.mutex);
102 	list_add_tail(&robj->list, &rdev->gem.objects);
103 	mutex_unlock(&rdev->gem.mutex);
104 
105 	return 0;
106 }
107 
108 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
109 			  uint32_t rdomain, uint32_t wdomain)
110 {
111 	struct radeon_bo *robj;
112 	uint32_t domain;
113 	long r;
114 
115 	/* FIXME: reeimplement */
116 	robj = gem_to_radeon_bo(gobj);
117 	/* work out where to validate the buffer to */
118 	domain = wdomain;
119 	if (!domain) {
120 		domain = rdomain;
121 	}
122 	if (!domain) {
123 		/* Do nothings */
124 		pr_warn("Set domain without domain !\n");
125 		return 0;
126 	}
127 	if (domain == RADEON_GEM_DOMAIN_CPU) {
128 		/* Asking for cpu access wait for object idle */
129 		r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
130 		if (!r)
131 			r = -EBUSY;
132 
133 		if (r < 0 && r != -EINTR) {
134 			pr_err("Failed to wait for object: %li\n", r);
135 			return r;
136 		}
137 	}
138 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
139 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
140 		return -EINVAL;
141 	}
142 	return 0;
143 }
144 
145 int radeon_gem_init(struct radeon_device *rdev)
146 {
147 	INIT_LIST_HEAD(&rdev->gem.objects);
148 	return 0;
149 }
150 
151 void radeon_gem_fini(struct radeon_device *rdev)
152 {
153 	radeon_bo_force_delete(rdev);
154 }
155 
156 /*
157  * Call from drm_gem_handle_create which appear in both new and open ioctl
158  * case.
159  */
160 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
161 {
162 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
163 	struct radeon_device *rdev = rbo->rdev;
164 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
165 	struct radeon_vm *vm = &fpriv->vm;
166 	struct radeon_bo_va *bo_va;
167 	int r;
168 
169 	if ((rdev->family < CHIP_CAYMAN) ||
170 	    (!rdev->accel_working)) {
171 		return 0;
172 	}
173 
174 	r = radeon_bo_reserve(rbo, false);
175 	if (r) {
176 		return r;
177 	}
178 
179 	bo_va = radeon_vm_bo_find(vm, rbo);
180 	if (!bo_va) {
181 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
182 	} else {
183 		++bo_va->ref_count;
184 	}
185 	radeon_bo_unreserve(rbo);
186 
187 	return 0;
188 }
189 
190 static void radeon_gem_object_close(struct drm_gem_object *obj,
191 				    struct drm_file *file_priv)
192 {
193 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
194 	struct radeon_device *rdev = rbo->rdev;
195 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
196 	struct radeon_vm *vm = &fpriv->vm;
197 	struct radeon_bo_va *bo_va;
198 	int r;
199 
200 	if ((rdev->family < CHIP_CAYMAN) ||
201 	    (!rdev->accel_working)) {
202 		return;
203 	}
204 
205 	r = radeon_bo_reserve(rbo, true);
206 	if (r) {
207 		dev_err(rdev->dev, "leaking bo va because "
208 			"we fail to reserve bo (%d)\n", r);
209 		return;
210 	}
211 	bo_va = radeon_vm_bo_find(vm, rbo);
212 	if (bo_va) {
213 		if (--bo_va->ref_count == 0) {
214 			radeon_vm_bo_rmv(rdev, bo_va);
215 		}
216 	}
217 	radeon_bo_unreserve(rbo);
218 }
219 
220 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
221 {
222 	if (r == -EDEADLK) {
223 		r = radeon_gpu_reset(rdev);
224 		if (!r)
225 			r = -EAGAIN;
226 	}
227 	return r;
228 }
229 
230 static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
231 	.free = radeon_gem_object_free,
232 	.open = radeon_gem_object_open,
233 	.close = radeon_gem_object_close,
234 	.export = radeon_gem_prime_export,
235 	.pin = radeon_gem_prime_pin,
236 	.unpin = radeon_gem_prime_unpin,
237 	.get_sg_table = radeon_gem_prime_get_sg_table,
238 	.vmap = drm_gem_ttm_vmap,
239 	.vunmap = drm_gem_ttm_vunmap,
240 };
241 
242 /*
243  * GEM ioctls.
244  */
245 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
246 			  struct drm_file *filp)
247 {
248 	struct radeon_device *rdev = dev->dev_private;
249 	struct drm_radeon_gem_info *args = data;
250 	struct ttm_resource_manager *man;
251 
252 	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
253 
254 	args->vram_size = (u64)man->size << PAGE_SHIFT;
255 	args->vram_visible = rdev->mc.visible_vram_size;
256 	args->vram_visible -= rdev->vram_pin_size;
257 	args->gart_size = rdev->mc.gtt_size;
258 	args->gart_size -= rdev->gart_pin_size;
259 
260 	return 0;
261 }
262 
263 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
264 			   struct drm_file *filp)
265 {
266 	/* TODO: implement */
267 	DRM_ERROR("unimplemented %s\n", __func__);
268 	return -ENOSYS;
269 }
270 
271 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
272 			    struct drm_file *filp)
273 {
274 	/* TODO: implement */
275 	DRM_ERROR("unimplemented %s\n", __func__);
276 	return -ENOSYS;
277 }
278 
279 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
280 			    struct drm_file *filp)
281 {
282 	struct radeon_device *rdev = dev->dev_private;
283 	struct drm_radeon_gem_create *args = data;
284 	struct drm_gem_object *gobj;
285 	uint32_t handle;
286 	int r;
287 
288 	down_read(&rdev->exclusive_lock);
289 	/* create a gem object to contain this object in */
290 	args->size = roundup(args->size, PAGE_SIZE);
291 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
292 				     args->initial_domain, args->flags,
293 				     false, &gobj);
294 	if (r) {
295 		up_read(&rdev->exclusive_lock);
296 		r = radeon_gem_handle_lockup(rdev, r);
297 		return r;
298 	}
299 	r = drm_gem_handle_create(filp, gobj, &handle);
300 	/* drop reference from allocate - handle holds it now */
301 	drm_gem_object_put(gobj);
302 	if (r) {
303 		up_read(&rdev->exclusive_lock);
304 		r = radeon_gem_handle_lockup(rdev, r);
305 		return r;
306 	}
307 	args->handle = handle;
308 	up_read(&rdev->exclusive_lock);
309 	return 0;
310 }
311 
312 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
313 			     struct drm_file *filp)
314 {
315 	struct ttm_operation_ctx ctx = { true, false };
316 	struct radeon_device *rdev = dev->dev_private;
317 	struct drm_radeon_gem_userptr *args = data;
318 	struct drm_gem_object *gobj;
319 	struct radeon_bo *bo;
320 	uint32_t handle;
321 	int r;
322 
323 	args->addr = untagged_addr(args->addr);
324 
325 	if (offset_in_page(args->addr | args->size))
326 		return -EINVAL;
327 
328 	/* reject unknown flag values */
329 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
330 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
331 	    RADEON_GEM_USERPTR_REGISTER))
332 		return -EINVAL;
333 
334 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
335 		/* readonly pages not tested on older hardware */
336 		if (rdev->family < CHIP_R600)
337 			return -EINVAL;
338 
339 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
340 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
341 
342 		/* if we want to write to it we must require anonymous
343 		   memory and install a MMU notifier */
344 		return -EACCES;
345 	}
346 
347 	down_read(&rdev->exclusive_lock);
348 
349 	/* create a gem object to contain this object in */
350 	r = radeon_gem_object_create(rdev, args->size, 0,
351 				     RADEON_GEM_DOMAIN_CPU, 0,
352 				     false, &gobj);
353 	if (r)
354 		goto handle_lockup;
355 
356 	bo = gem_to_radeon_bo(gobj);
357 	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
358 	if (r)
359 		goto release_object;
360 
361 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
362 		r = radeon_mn_register(bo, args->addr);
363 		if (r)
364 			goto release_object;
365 	}
366 
367 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
368 		mmap_read_lock(current->mm);
369 		r = radeon_bo_reserve(bo, true);
370 		if (r) {
371 			mmap_read_unlock(current->mm);
372 			goto release_object;
373 		}
374 
375 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
376 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
377 		radeon_bo_unreserve(bo);
378 		mmap_read_unlock(current->mm);
379 		if (r)
380 			goto release_object;
381 	}
382 
383 	r = drm_gem_handle_create(filp, gobj, &handle);
384 	/* drop reference from allocate - handle holds it now */
385 	drm_gem_object_put(gobj);
386 	if (r)
387 		goto handle_lockup;
388 
389 	args->handle = handle;
390 	up_read(&rdev->exclusive_lock);
391 	return 0;
392 
393 release_object:
394 	drm_gem_object_put(gobj);
395 
396 handle_lockup:
397 	up_read(&rdev->exclusive_lock);
398 	r = radeon_gem_handle_lockup(rdev, r);
399 
400 	return r;
401 }
402 
403 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
404 				struct drm_file *filp)
405 {
406 	/* transition the BO to a domain -
407 	 * just validate the BO into a certain domain */
408 	struct radeon_device *rdev = dev->dev_private;
409 	struct drm_radeon_gem_set_domain *args = data;
410 	struct drm_gem_object *gobj;
411 	struct radeon_bo *robj;
412 	int r;
413 
414 	/* for now if someone requests domain CPU -
415 	 * just make sure the buffer is finished with */
416 	down_read(&rdev->exclusive_lock);
417 
418 	/* just do a BO wait for now */
419 	gobj = drm_gem_object_lookup(filp, args->handle);
420 	if (gobj == NULL) {
421 		up_read(&rdev->exclusive_lock);
422 		return -ENOENT;
423 	}
424 	robj = gem_to_radeon_bo(gobj);
425 
426 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
427 
428 	drm_gem_object_put(gobj);
429 	up_read(&rdev->exclusive_lock);
430 	r = radeon_gem_handle_lockup(robj->rdev, r);
431 	return r;
432 }
433 
434 int radeon_mode_dumb_mmap(struct drm_file *filp,
435 			  struct drm_device *dev,
436 			  uint32_t handle, uint64_t *offset_p)
437 {
438 	struct drm_gem_object *gobj;
439 	struct radeon_bo *robj;
440 
441 	gobj = drm_gem_object_lookup(filp, handle);
442 	if (gobj == NULL) {
443 		return -ENOENT;
444 	}
445 	robj = gem_to_radeon_bo(gobj);
446 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
447 		drm_gem_object_put(gobj);
448 		return -EPERM;
449 	}
450 	*offset_p = radeon_bo_mmap_offset(robj);
451 	drm_gem_object_put(gobj);
452 	return 0;
453 }
454 
455 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
456 			  struct drm_file *filp)
457 {
458 	struct drm_radeon_gem_mmap *args = data;
459 
460 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
461 }
462 
463 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
464 			  struct drm_file *filp)
465 {
466 	struct drm_radeon_gem_busy *args = data;
467 	struct drm_gem_object *gobj;
468 	struct radeon_bo *robj;
469 	int r;
470 	uint32_t cur_placement = 0;
471 
472 	gobj = drm_gem_object_lookup(filp, args->handle);
473 	if (gobj == NULL) {
474 		return -ENOENT;
475 	}
476 	robj = gem_to_radeon_bo(gobj);
477 
478 	r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
479 	if (r == 0)
480 		r = -EBUSY;
481 	else
482 		r = 0;
483 
484 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
485 	args->domain = radeon_mem_type_to_domain(cur_placement);
486 	drm_gem_object_put(gobj);
487 	return r;
488 }
489 
490 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
491 			      struct drm_file *filp)
492 {
493 	struct radeon_device *rdev = dev->dev_private;
494 	struct drm_radeon_gem_wait_idle *args = data;
495 	struct drm_gem_object *gobj;
496 	struct radeon_bo *robj;
497 	int r = 0;
498 	uint32_t cur_placement = 0;
499 	long ret;
500 
501 	gobj = drm_gem_object_lookup(filp, args->handle);
502 	if (gobj == NULL) {
503 		return -ENOENT;
504 	}
505 	robj = gem_to_radeon_bo(gobj);
506 
507 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
508 	if (ret == 0)
509 		r = -EBUSY;
510 	else if (ret < 0)
511 		r = ret;
512 
513 	/* Flush HDP cache via MMIO if necessary */
514 	cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
515 	if (rdev->asic->mmio_hdp_flush &&
516 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
517 		robj->rdev->asic->mmio_hdp_flush(rdev);
518 	drm_gem_object_put(gobj);
519 	r = radeon_gem_handle_lockup(rdev, r);
520 	return r;
521 }
522 
523 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
524 				struct drm_file *filp)
525 {
526 	struct drm_radeon_gem_set_tiling *args = data;
527 	struct drm_gem_object *gobj;
528 	struct radeon_bo *robj;
529 	int r = 0;
530 
531 	DRM_DEBUG("%d \n", args->handle);
532 	gobj = drm_gem_object_lookup(filp, args->handle);
533 	if (gobj == NULL)
534 		return -ENOENT;
535 	robj = gem_to_radeon_bo(gobj);
536 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
537 	drm_gem_object_put(gobj);
538 	return r;
539 }
540 
541 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
542 				struct drm_file *filp)
543 {
544 	struct drm_radeon_gem_get_tiling *args = data;
545 	struct drm_gem_object *gobj;
546 	struct radeon_bo *rbo;
547 	int r = 0;
548 
549 	DRM_DEBUG("\n");
550 	gobj = drm_gem_object_lookup(filp, args->handle);
551 	if (gobj == NULL)
552 		return -ENOENT;
553 	rbo = gem_to_radeon_bo(gobj);
554 	r = radeon_bo_reserve(rbo, false);
555 	if (unlikely(r != 0))
556 		goto out;
557 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
558 	radeon_bo_unreserve(rbo);
559 out:
560 	drm_gem_object_put(gobj);
561 	return r;
562 }
563 
564 /**
565  * radeon_gem_va_update_vm -update the bo_va in its VM
566  *
567  * @rdev: radeon_device pointer
568  * @bo_va: bo_va to update
569  *
570  * Update the bo_va directly after setting it's address. Errors are not
571  * vital here, so they are not reported back to userspace.
572  */
573 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
574 				    struct radeon_bo_va *bo_va)
575 {
576 	struct ttm_validate_buffer tv, *entry;
577 	struct radeon_bo_list *vm_bos;
578 	struct ww_acquire_ctx ticket;
579 	struct list_head list;
580 	unsigned domain;
581 	int r;
582 
583 	INIT_LIST_HEAD(&list);
584 
585 	tv.bo = &bo_va->bo->tbo;
586 	tv.num_shared = 1;
587 	list_add(&tv.head, &list);
588 
589 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
590 	if (!vm_bos)
591 		return;
592 
593 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
594 	if (r)
595 		goto error_free;
596 
597 	list_for_each_entry(entry, &list, head) {
598 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
599 		/* if anything is swapped out don't swap it in here,
600 		   just abort and wait for the next CS */
601 		if (domain == RADEON_GEM_DOMAIN_CPU)
602 			goto error_unreserve;
603 	}
604 
605 	mutex_lock(&bo_va->vm->mutex);
606 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
607 	if (r)
608 		goto error_unlock;
609 
610 	if (bo_va->it.start)
611 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
612 
613 error_unlock:
614 	mutex_unlock(&bo_va->vm->mutex);
615 
616 error_unreserve:
617 	ttm_eu_backoff_reservation(&ticket, &list);
618 
619 error_free:
620 	kvfree(vm_bos);
621 
622 	if (r && r != -ERESTARTSYS)
623 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
624 }
625 
626 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
627 			  struct drm_file *filp)
628 {
629 	struct drm_radeon_gem_va *args = data;
630 	struct drm_gem_object *gobj;
631 	struct radeon_device *rdev = dev->dev_private;
632 	struct radeon_fpriv *fpriv = filp->driver_priv;
633 	struct radeon_bo *rbo;
634 	struct radeon_bo_va *bo_va;
635 	u32 invalid_flags;
636 	int r = 0;
637 
638 	if (!rdev->vm_manager.enabled) {
639 		args->operation = RADEON_VA_RESULT_ERROR;
640 		return -ENOTTY;
641 	}
642 
643 	/* !! DONT REMOVE !!
644 	 * We don't support vm_id yet, to be sure we don't have have broken
645 	 * userspace, reject anyone trying to use non 0 value thus moving
646 	 * forward we can use those fields without breaking existant userspace
647 	 */
648 	if (args->vm_id) {
649 		args->operation = RADEON_VA_RESULT_ERROR;
650 		return -EINVAL;
651 	}
652 
653 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
654 		dev_err(&dev->pdev->dev,
655 			"offset 0x%lX is in reserved area 0x%X\n",
656 			(unsigned long)args->offset,
657 			RADEON_VA_RESERVED_SIZE);
658 		args->operation = RADEON_VA_RESULT_ERROR;
659 		return -EINVAL;
660 	}
661 
662 	/* don't remove, we need to enforce userspace to set the snooped flag
663 	 * otherwise we will endup with broken userspace and we won't be able
664 	 * to enable this feature without adding new interface
665 	 */
666 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
667 	if ((args->flags & invalid_flags)) {
668 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
669 			args->flags, invalid_flags);
670 		args->operation = RADEON_VA_RESULT_ERROR;
671 		return -EINVAL;
672 	}
673 
674 	switch (args->operation) {
675 	case RADEON_VA_MAP:
676 	case RADEON_VA_UNMAP:
677 		break;
678 	default:
679 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
680 			args->operation);
681 		args->operation = RADEON_VA_RESULT_ERROR;
682 		return -EINVAL;
683 	}
684 
685 	gobj = drm_gem_object_lookup(filp, args->handle);
686 	if (gobj == NULL) {
687 		args->operation = RADEON_VA_RESULT_ERROR;
688 		return -ENOENT;
689 	}
690 	rbo = gem_to_radeon_bo(gobj);
691 	r = radeon_bo_reserve(rbo, false);
692 	if (r) {
693 		args->operation = RADEON_VA_RESULT_ERROR;
694 		drm_gem_object_put(gobj);
695 		return r;
696 	}
697 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
698 	if (!bo_va) {
699 		args->operation = RADEON_VA_RESULT_ERROR;
700 		radeon_bo_unreserve(rbo);
701 		drm_gem_object_put(gobj);
702 		return -ENOENT;
703 	}
704 
705 	switch (args->operation) {
706 	case RADEON_VA_MAP:
707 		if (bo_va->it.start) {
708 			args->operation = RADEON_VA_RESULT_VA_EXIST;
709 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
710 			radeon_bo_unreserve(rbo);
711 			goto out;
712 		}
713 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
714 		break;
715 	case RADEON_VA_UNMAP:
716 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
717 		break;
718 	default:
719 		break;
720 	}
721 	if (!r)
722 		radeon_gem_va_update_vm(rdev, bo_va);
723 	args->operation = RADEON_VA_RESULT_OK;
724 	if (r) {
725 		args->operation = RADEON_VA_RESULT_ERROR;
726 	}
727 out:
728 	drm_gem_object_put(gobj);
729 	return r;
730 }
731 
732 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
733 			struct drm_file *filp)
734 {
735 	struct drm_radeon_gem_op *args = data;
736 	struct drm_gem_object *gobj;
737 	struct radeon_bo *robj;
738 	int r;
739 
740 	gobj = drm_gem_object_lookup(filp, args->handle);
741 	if (gobj == NULL) {
742 		return -ENOENT;
743 	}
744 	robj = gem_to_radeon_bo(gobj);
745 
746 	r = -EPERM;
747 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
748 		goto out;
749 
750 	r = radeon_bo_reserve(robj, false);
751 	if (unlikely(r))
752 		goto out;
753 
754 	switch (args->op) {
755 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
756 		args->value = robj->initial_domain;
757 		break;
758 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
759 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
760 						      RADEON_GEM_DOMAIN_GTT |
761 						      RADEON_GEM_DOMAIN_CPU);
762 		break;
763 	default:
764 		r = -EINVAL;
765 	}
766 
767 	radeon_bo_unreserve(robj);
768 out:
769 	drm_gem_object_put(gobj);
770 	return r;
771 }
772 
773 int radeon_mode_dumb_create(struct drm_file *file_priv,
774 			    struct drm_device *dev,
775 			    struct drm_mode_create_dumb *args)
776 {
777 	struct radeon_device *rdev = dev->dev_private;
778 	struct drm_gem_object *gobj;
779 	uint32_t handle;
780 	int r;
781 
782 	args->pitch = radeon_align_pitch(rdev, args->width,
783 					 DIV_ROUND_UP(args->bpp, 8), 0);
784 	args->size = args->pitch * args->height;
785 	args->size = ALIGN(args->size, PAGE_SIZE);
786 
787 	r = radeon_gem_object_create(rdev, args->size, 0,
788 				     RADEON_GEM_DOMAIN_VRAM, 0,
789 				     false, &gobj);
790 	if (r)
791 		return -ENOMEM;
792 
793 	r = drm_gem_handle_create(file_priv, gobj, &handle);
794 	/* drop reference from allocate - handle holds it now */
795 	drm_gem_object_put(gobj);
796 	if (r) {
797 		return r;
798 	}
799 	args->handle = handle;
800 	return 0;
801 }
802 
803 #if defined(CONFIG_DEBUG_FS)
804 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
805 {
806 	struct drm_info_node *node = (struct drm_info_node *)m->private;
807 	struct drm_device *dev = node->minor->dev;
808 	struct radeon_device *rdev = dev->dev_private;
809 	struct radeon_bo *rbo;
810 	unsigned i = 0;
811 
812 	mutex_lock(&rdev->gem.mutex);
813 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
814 		unsigned domain;
815 		const char *placement;
816 
817 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
818 		switch (domain) {
819 		case RADEON_GEM_DOMAIN_VRAM:
820 			placement = "VRAM";
821 			break;
822 		case RADEON_GEM_DOMAIN_GTT:
823 			placement = " GTT";
824 			break;
825 		case RADEON_GEM_DOMAIN_CPU:
826 		default:
827 			placement = " CPU";
828 			break;
829 		}
830 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
831 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
832 			   placement, (unsigned long)rbo->pid);
833 		i++;
834 	}
835 	mutex_unlock(&rdev->gem.mutex);
836 	return 0;
837 }
838 
839 static struct drm_info_list radeon_debugfs_gem_list[] = {
840 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
841 };
842 #endif
843 
844 int radeon_gem_debugfs_init(struct radeon_device *rdev)
845 {
846 #if defined(CONFIG_DEBUG_FS)
847 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
848 #endif
849 	return 0;
850 }
851