1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31 
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 #include "nouveau_mem.h"
35 #include "nouveau_vmm.h"
36 
37 #include <nvif/class.h>
38 #include <nvif/push206e.h>
39 
40 void
41 nouveau_gem_object_del(struct drm_gem_object *gem)
42 {
43 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
44 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
45 	struct device *dev = drm->dev->dev;
46 	int ret;
47 
48 	ret = pm_runtime_get_sync(dev);
49 	if (WARN_ON(ret < 0 && ret != -EACCES)) {
50 		pm_runtime_put_autosuspend(dev);
51 		return;
52 	}
53 
54 	if (gem->import_attach)
55 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
56 
57 	ttm_bo_put(&nvbo->bo);
58 
59 	pm_runtime_mark_last_busy(dev);
60 	pm_runtime_put_autosuspend(dev);
61 }
62 
63 int
64 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
65 {
66 	struct nouveau_cli *cli = nouveau_cli(file_priv);
67 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
68 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
69 	struct device *dev = drm->dev->dev;
70 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
71 	struct nouveau_vma *vma;
72 	int ret;
73 
74 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
75 		return 0;
76 
77 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
78 	if (ret)
79 		return ret;
80 
81 	ret = pm_runtime_get_sync(dev);
82 	if (ret < 0 && ret != -EACCES) {
83 		pm_runtime_put_autosuspend(dev);
84 		goto out;
85 	}
86 
87 	ret = nouveau_vma_new(nvbo, vmm, &vma);
88 	pm_runtime_mark_last_busy(dev);
89 	pm_runtime_put_autosuspend(dev);
90 out:
91 	ttm_bo_unreserve(&nvbo->bo);
92 	return ret;
93 }
94 
95 struct nouveau_gem_object_unmap {
96 	struct nouveau_cli_work work;
97 	struct nouveau_vma *vma;
98 };
99 
100 static void
101 nouveau_gem_object_delete(struct nouveau_vma *vma)
102 {
103 	nouveau_fence_unref(&vma->fence);
104 	nouveau_vma_del(&vma);
105 }
106 
107 static void
108 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
109 {
110 	struct nouveau_gem_object_unmap *work =
111 		container_of(w, typeof(*work), work);
112 	nouveau_gem_object_delete(work->vma);
113 	kfree(work);
114 }
115 
116 static void
117 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
118 {
119 	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
120 	struct nouveau_gem_object_unmap *work;
121 
122 	list_del_init(&vma->head);
123 
124 	if (!fence) {
125 		nouveau_gem_object_delete(vma);
126 		return;
127 	}
128 
129 	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
130 		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
131 		nouveau_gem_object_delete(vma);
132 		return;
133 	}
134 
135 	work->work.func = nouveau_gem_object_delete_work;
136 	work->vma = vma;
137 	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
138 }
139 
140 void
141 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
142 {
143 	struct nouveau_cli *cli = nouveau_cli(file_priv);
144 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
145 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
146 	struct device *dev = drm->dev->dev;
147 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
148 	struct nouveau_vma *vma;
149 	int ret;
150 
151 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
152 		return;
153 
154 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
155 	if (ret)
156 		return;
157 
158 	vma = nouveau_vma_find(nvbo, vmm);
159 	if (vma) {
160 		if (--vma->refs == 0) {
161 			ret = pm_runtime_get_sync(dev);
162 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
163 				nouveau_gem_object_unmap(nvbo, vma);
164 				pm_runtime_mark_last_busy(dev);
165 			}
166 			pm_runtime_put_autosuspend(dev);
167 		}
168 	}
169 	ttm_bo_unreserve(&nvbo->bo);
170 }
171 
172 int
173 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
174 		uint32_t tile_mode, uint32_t tile_flags,
175 		struct nouveau_bo **pnvbo)
176 {
177 	struct nouveau_drm *drm = cli->drm;
178 	struct nouveau_bo *nvbo;
179 	int ret;
180 
181 	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
182 		domain |= NOUVEAU_GEM_DOMAIN_CPU;
183 
184 	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
185 				tile_flags);
186 	if (IS_ERR(nvbo))
187 		return PTR_ERR(nvbo);
188 
189 	/* Initialize the embedded gem-object. We return a single gem-reference
190 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
191 	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
192 	if (ret) {
193 		nouveau_bo_ref(NULL, &nvbo);
194 		return ret;
195 	}
196 
197 	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
198 	if (ret) {
199 		nouveau_bo_ref(NULL, &nvbo);
200 		return ret;
201 	}
202 
203 	/* we restrict allowed domains on nv50+ to only the types
204 	 * that were requested at creation time.  not possibly on
205 	 * earlier chips without busting the ABI.
206 	 */
207 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208 			      NOUVEAU_GEM_DOMAIN_GART;
209 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210 		nvbo->valid_domains &= domain;
211 
212 	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
213 	*pnvbo = nvbo;
214 	return 0;
215 }
216 
217 static int
218 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
219 		 struct drm_nouveau_gem_info *rep)
220 {
221 	struct nouveau_cli *cli = nouveau_cli(file_priv);
222 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
223 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
224 	struct nouveau_vma *vma;
225 
226 	if (is_power_of_2(nvbo->valid_domains))
227 		rep->domain = nvbo->valid_domains;
228 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
229 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
230 	else
231 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
232 	rep->offset = nvbo->offset;
233 	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
234 		vma = nouveau_vma_find(nvbo, vmm);
235 		if (!vma)
236 			return -EINVAL;
237 
238 		rep->offset = vma->addr;
239 	}
240 
241 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
242 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
243 	rep->tile_mode = nvbo->mode;
244 	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
245 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
246 		rep->tile_flags |= nvbo->kind << 8;
247 	else
248 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
249 		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
250 	else
251 		rep->tile_flags |= nvbo->zeta;
252 	return 0;
253 }
254 
255 int
256 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
257 		      struct drm_file *file_priv)
258 {
259 	struct nouveau_cli *cli = nouveau_cli(file_priv);
260 	struct drm_nouveau_gem_new *req = data;
261 	struct nouveau_bo *nvbo = NULL;
262 	int ret = 0;
263 
264 	ret = nouveau_gem_new(cli, req->info.size, req->align,
265 			      req->info.domain, req->info.tile_mode,
266 			      req->info.tile_flags, &nvbo);
267 	if (ret)
268 		return ret;
269 
270 	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
271 				    &req->info.handle);
272 	if (ret == 0) {
273 		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
274 		if (ret)
275 			drm_gem_handle_delete(file_priv, req->info.handle);
276 	}
277 
278 	/* drop reference from allocate - handle holds it now */
279 	drm_gem_object_put(&nvbo->bo.base);
280 	return ret;
281 }
282 
283 static int
284 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
285 		       uint32_t write_domains, uint32_t valid_domains)
286 {
287 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
288 	struct ttm_buffer_object *bo = &nvbo->bo;
289 	uint32_t domains = valid_domains & nvbo->valid_domains &
290 		(write_domains ? write_domains : read_domains);
291 	uint32_t pref_domains = 0;;
292 
293 	if (!domains)
294 		return -EINVAL;
295 
296 	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
297 
298 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
299 	    bo->mem.mem_type == TTM_PL_VRAM)
300 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
301 
302 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
303 		 bo->mem.mem_type == TTM_PL_TT)
304 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
305 
306 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
307 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
308 
309 	else
310 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
311 
312 	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
313 
314 	return 0;
315 }
316 
317 struct validate_op {
318 	struct list_head list;
319 	struct ww_acquire_ctx ticket;
320 };
321 
322 static void
323 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
324 			struct nouveau_fence *fence,
325 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
326 {
327 	struct nouveau_bo *nvbo;
328 	struct drm_nouveau_gem_pushbuf_bo *b;
329 
330 	while (!list_empty(&op->list)) {
331 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
332 		b = &pbbo[nvbo->pbbo_index];
333 
334 		if (likely(fence)) {
335 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
336 
337 			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
338 				struct nouveau_vma *vma =
339 					(void *)(unsigned long)b->user_priv;
340 				nouveau_fence_unref(&vma->fence);
341 				dma_fence_get(&fence->base);
342 				vma->fence = fence;
343 			}
344 		}
345 
346 		if (unlikely(nvbo->validate_mapped)) {
347 			ttm_bo_kunmap(&nvbo->kmap);
348 			nvbo->validate_mapped = false;
349 		}
350 
351 		list_del(&nvbo->entry);
352 		nvbo->reserved_by = NULL;
353 		ttm_bo_unreserve(&nvbo->bo);
354 		drm_gem_object_put(&nvbo->bo.base);
355 	}
356 }
357 
358 static void
359 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
360 	      struct nouveau_fence *fence,
361 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
362 {
363 	validate_fini_no_ticket(op, chan, fence, pbbo);
364 	ww_acquire_fini(&op->ticket);
365 }
366 
367 static int
368 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
369 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
370 	      int nr_buffers, struct validate_op *op)
371 {
372 	struct nouveau_cli *cli = nouveau_cli(file_priv);
373 	int trycnt = 0;
374 	int ret = -EINVAL, i;
375 	struct nouveau_bo *res_bo = NULL;
376 	LIST_HEAD(gart_list);
377 	LIST_HEAD(vram_list);
378 	LIST_HEAD(both_list);
379 
380 	ww_acquire_init(&op->ticket, &reservation_ww_class);
381 retry:
382 	if (++trycnt > 100000) {
383 		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
384 		return -EINVAL;
385 	}
386 
387 	for (i = 0; i < nr_buffers; i++) {
388 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
389 		struct drm_gem_object *gem;
390 		struct nouveau_bo *nvbo;
391 
392 		gem = drm_gem_object_lookup(file_priv, b->handle);
393 		if (!gem) {
394 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
395 			ret = -ENOENT;
396 			break;
397 		}
398 		nvbo = nouveau_gem_object(gem);
399 		if (nvbo == res_bo) {
400 			res_bo = NULL;
401 			drm_gem_object_put(gem);
402 			continue;
403 		}
404 
405 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
406 			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
407 				      "validation list\n", b->handle);
408 			drm_gem_object_put(gem);
409 			ret = -EINVAL;
410 			break;
411 		}
412 
413 		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
414 		if (ret) {
415 			list_splice_tail_init(&vram_list, &op->list);
416 			list_splice_tail_init(&gart_list, &op->list);
417 			list_splice_tail_init(&both_list, &op->list);
418 			validate_fini_no_ticket(op, chan, NULL, NULL);
419 			if (unlikely(ret == -EDEADLK)) {
420 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
421 							      &op->ticket);
422 				if (!ret)
423 					res_bo = nvbo;
424 			}
425 			if (unlikely(ret)) {
426 				if (ret != -ERESTARTSYS)
427 					NV_PRINTK(err, cli, "fail reserve\n");
428 				break;
429 			}
430 		}
431 
432 		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
433 			struct nouveau_vmm *vmm = chan->vmm;
434 			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
435 			if (!vma) {
436 				NV_PRINTK(err, cli, "vma not found!\n");
437 				ret = -EINVAL;
438 				break;
439 			}
440 
441 			b->user_priv = (uint64_t)(unsigned long)vma;
442 		} else {
443 			b->user_priv = (uint64_t)(unsigned long)nvbo;
444 		}
445 
446 		nvbo->reserved_by = file_priv;
447 		nvbo->pbbo_index = i;
448 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
449 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
450 			list_add_tail(&nvbo->entry, &both_list);
451 		else
452 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
453 			list_add_tail(&nvbo->entry, &vram_list);
454 		else
455 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
456 			list_add_tail(&nvbo->entry, &gart_list);
457 		else {
458 			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
459 				 b->valid_domains);
460 			list_add_tail(&nvbo->entry, &both_list);
461 			ret = -EINVAL;
462 			break;
463 		}
464 		if (nvbo == res_bo)
465 			goto retry;
466 	}
467 
468 	ww_acquire_done(&op->ticket);
469 	list_splice_tail(&vram_list, &op->list);
470 	list_splice_tail(&gart_list, &op->list);
471 	list_splice_tail(&both_list, &op->list);
472 	if (ret)
473 		validate_fini(op, chan, NULL, NULL);
474 	return ret;
475 
476 }
477 
478 static int
479 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
480 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
481 {
482 	struct nouveau_drm *drm = chan->drm;
483 	struct nouveau_bo *nvbo;
484 	int ret, relocs = 0;
485 
486 	list_for_each_entry(nvbo, list, entry) {
487 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
488 
489 		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
490 					     b->write_domains,
491 					     b->valid_domains);
492 		if (unlikely(ret)) {
493 			NV_PRINTK(err, cli, "fail set_domain\n");
494 			return ret;
495 		}
496 
497 		ret = nouveau_bo_validate(nvbo, true, false);
498 		if (unlikely(ret)) {
499 			if (ret != -ERESTARTSYS)
500 				NV_PRINTK(err, cli, "fail ttm_validate\n");
501 			return ret;
502 		}
503 
504 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
505 		if (unlikely(ret)) {
506 			if (ret != -ERESTARTSYS)
507 				NV_PRINTK(err, cli, "fail post-validate sync\n");
508 			return ret;
509 		}
510 
511 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
512 			if (nvbo->offset == b->presumed.offset &&
513 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
514 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
515 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
516 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
517 				continue;
518 
519 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
520 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
521 			else
522 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
523 			b->presumed.offset = nvbo->offset;
524 			b->presumed.valid = 0;
525 			relocs++;
526 		}
527 	}
528 
529 	return relocs;
530 }
531 
532 static int
533 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
534 			     struct drm_file *file_priv,
535 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
536 			     int nr_buffers,
537 			     struct validate_op *op, bool *apply_relocs)
538 {
539 	struct nouveau_cli *cli = nouveau_cli(file_priv);
540 	int ret;
541 
542 	INIT_LIST_HEAD(&op->list);
543 
544 	if (nr_buffers == 0)
545 		return 0;
546 
547 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
548 	if (unlikely(ret)) {
549 		if (ret != -ERESTARTSYS)
550 			NV_PRINTK(err, cli, "validate_init\n");
551 		return ret;
552 	}
553 
554 	ret = validate_list(chan, cli, &op->list, pbbo);
555 	if (unlikely(ret < 0)) {
556 		if (ret != -ERESTARTSYS)
557 			NV_PRINTK(err, cli, "validating bo list\n");
558 		validate_fini(op, chan, NULL, NULL);
559 		return ret;
560 	}
561 	*apply_relocs = ret;
562 	return 0;
563 }
564 
565 static inline void
566 u_free(void *addr)
567 {
568 	kvfree(addr);
569 }
570 
571 static inline void *
572 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
573 {
574 	void *mem;
575 	void __user *userptr = (void __force __user *)(uintptr_t)user;
576 
577 	size *= nmemb;
578 
579 	mem = kvmalloc(size, GFP_KERNEL);
580 	if (!mem)
581 		return ERR_PTR(-ENOMEM);
582 
583 	if (copy_from_user(mem, userptr, size)) {
584 		u_free(mem);
585 		return ERR_PTR(-EFAULT);
586 	}
587 
588 	return mem;
589 }
590 
591 static int
592 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
593 				struct drm_nouveau_gem_pushbuf *req,
594 				struct drm_nouveau_gem_pushbuf_reloc *reloc,
595 				struct drm_nouveau_gem_pushbuf_bo *bo)
596 {
597 	int ret = 0;
598 	unsigned i;
599 
600 	for (i = 0; i < req->nr_relocs; i++) {
601 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
602 		struct drm_nouveau_gem_pushbuf_bo *b;
603 		struct nouveau_bo *nvbo;
604 		uint32_t data;
605 
606 		if (unlikely(r->bo_index >= req->nr_buffers)) {
607 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
608 			ret = -EINVAL;
609 			break;
610 		}
611 
612 		b = &bo[r->bo_index];
613 		if (b->presumed.valid)
614 			continue;
615 
616 		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
617 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
618 			ret = -EINVAL;
619 			break;
620 		}
621 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
622 
623 		if (unlikely(r->reloc_bo_offset + 4 >
624 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
625 			NV_PRINTK(err, cli, "reloc outside of bo\n");
626 			ret = -EINVAL;
627 			break;
628 		}
629 
630 		if (!nvbo->kmap.virtual) {
631 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
632 					  &nvbo->kmap);
633 			if (ret) {
634 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
635 				break;
636 			}
637 			nvbo->validate_mapped = true;
638 		}
639 
640 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
641 			data = b->presumed.offset + r->data;
642 		else
643 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
644 			data = (b->presumed.offset + r->data) >> 32;
645 		else
646 			data = r->data;
647 
648 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
649 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
650 				data |= r->tor;
651 			else
652 				data |= r->vor;
653 		}
654 
655 		ret = ttm_bo_wait(&nvbo->bo, false, false);
656 		if (ret) {
657 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
658 			break;
659 		}
660 
661 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
662 	}
663 
664 	u_free(reloc);
665 	return ret;
666 }
667 
668 int
669 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
670 			  struct drm_file *file_priv)
671 {
672 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
673 	struct nouveau_cli *cli = nouveau_cli(file_priv);
674 	struct nouveau_abi16_chan *temp;
675 	struct nouveau_drm *drm = nouveau_drm(dev);
676 	struct drm_nouveau_gem_pushbuf *req = data;
677 	struct drm_nouveau_gem_pushbuf_push *push;
678 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
679 	struct drm_nouveau_gem_pushbuf_bo *bo;
680 	struct nouveau_channel *chan = NULL;
681 	struct validate_op op;
682 	struct nouveau_fence *fence = NULL;
683 	int i, j, ret = 0;
684 	bool do_reloc = false, sync = false;
685 
686 	if (unlikely(!abi16))
687 		return -ENOMEM;
688 
689 	list_for_each_entry(temp, &abi16->channels, head) {
690 		if (temp->chan->chid == req->channel) {
691 			chan = temp->chan;
692 			break;
693 		}
694 	}
695 
696 	if (!chan)
697 		return nouveau_abi16_put(abi16, -ENOENT);
698 	if (unlikely(atomic_read(&chan->killed)))
699 		return nouveau_abi16_put(abi16, -ENODEV);
700 
701 	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
702 
703 	req->vram_available = drm->gem.vram_available;
704 	req->gart_available = drm->gem.gart_available;
705 	if (unlikely(req->nr_push == 0))
706 		goto out_next;
707 
708 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
709 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
710 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
711 		return nouveau_abi16_put(abi16, -EINVAL);
712 	}
713 
714 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
715 		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
716 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
717 		return nouveau_abi16_put(abi16, -EINVAL);
718 	}
719 
720 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
721 		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
722 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
723 		return nouveau_abi16_put(abi16, -EINVAL);
724 	}
725 
726 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
727 	if (IS_ERR(push))
728 		return nouveau_abi16_put(abi16, PTR_ERR(push));
729 
730 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
731 	if (IS_ERR(bo)) {
732 		u_free(push);
733 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
734 	}
735 
736 	/* Ensure all push buffers are on validate list */
737 	for (i = 0; i < req->nr_push; i++) {
738 		if (push[i].bo_index >= req->nr_buffers) {
739 			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
740 			ret = -EINVAL;
741 			goto out_prevalid;
742 		}
743 	}
744 
745 	/* Validate buffer list */
746 revalidate:
747 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
748 					   req->nr_buffers, &op, &do_reloc);
749 	if (ret) {
750 		if (ret != -ERESTARTSYS)
751 			NV_PRINTK(err, cli, "validate: %d\n", ret);
752 		goto out_prevalid;
753 	}
754 
755 	/* Apply any relocations that are required */
756 	if (do_reloc) {
757 		if (!reloc) {
758 			validate_fini(&op, chan, NULL, bo);
759 			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
760 			if (IS_ERR(reloc)) {
761 				ret = PTR_ERR(reloc);
762 				goto out_prevalid;
763 			}
764 
765 			goto revalidate;
766 		}
767 
768 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
769 		if (ret) {
770 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
771 			goto out;
772 		}
773 	}
774 
775 	if (chan->dma.ib_max) {
776 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
777 		if (ret) {
778 			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
779 			goto out;
780 		}
781 
782 		for (i = 0; i < req->nr_push; i++) {
783 			struct nouveau_vma *vma = (void *)(unsigned long)
784 				bo[push[i].bo_index].user_priv;
785 
786 			nv50_dma_push(chan, vma->addr + push[i].offset,
787 				      push[i].length);
788 		}
789 	} else
790 	if (drm->client.device.info.chipset >= 0x25) {
791 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
792 		if (ret) {
793 			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
794 			goto out;
795 		}
796 
797 		for (i = 0; i < req->nr_push; i++) {
798 			struct nouveau_bo *nvbo = (void *)(unsigned long)
799 				bo[push[i].bo_index].user_priv;
800 
801 			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
802 			PUSH_DATA(chan->chan.push, 0);
803 		}
804 	} else {
805 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
806 		if (ret) {
807 			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
808 			goto out;
809 		}
810 
811 		for (i = 0; i < req->nr_push; i++) {
812 			struct nouveau_bo *nvbo = (void *)(unsigned long)
813 				bo[push[i].bo_index].user_priv;
814 			uint32_t cmd;
815 
816 			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
817 			cmd |= 0x20000000;
818 			if (unlikely(cmd != req->suffix0)) {
819 				if (!nvbo->kmap.virtual) {
820 					ret = ttm_bo_kmap(&nvbo->bo, 0,
821 							  nvbo->bo.mem.
822 							  num_pages,
823 							  &nvbo->kmap);
824 					if (ret) {
825 						WIND_RING(chan);
826 						goto out;
827 					}
828 					nvbo->validate_mapped = true;
829 				}
830 
831 				nouveau_bo_wr32(nvbo, (push[i].offset +
832 						push[i].length - 8) / 4, cmd);
833 			}
834 
835 			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
836 			PUSH_DATA(chan->chan.push, 0);
837 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
838 				PUSH_DATA(chan->chan.push, 0);
839 		}
840 	}
841 
842 	ret = nouveau_fence_new(chan, false, &fence);
843 	if (ret) {
844 		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
845 		WIND_RING(chan);
846 		goto out;
847 	}
848 
849 	if (sync) {
850 		if (!(ret = nouveau_fence_wait(fence, false, false))) {
851 			if ((ret = dma_fence_get_status(&fence->base)) == 1)
852 				ret = 0;
853 		}
854 	}
855 
856 out:
857 	validate_fini(&op, chan, fence, bo);
858 	nouveau_fence_unref(&fence);
859 
860 	if (do_reloc) {
861 		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
862 			u64_to_user_ptr(req->buffers);
863 
864 		for (i = 0; i < req->nr_buffers; i++) {
865 			if (bo[i].presumed.valid)
866 				continue;
867 
868 			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
869 					 sizeof(bo[i].presumed))) {
870 				ret = -EFAULT;
871 				break;
872 			}
873 		}
874 		u_free(reloc);
875 	}
876 out_prevalid:
877 	u_free(bo);
878 	u_free(push);
879 
880 out_next:
881 	if (chan->dma.ib_max) {
882 		req->suffix0 = 0x00000000;
883 		req->suffix1 = 0x00000000;
884 	} else
885 	if (drm->client.device.info.chipset >= 0x25) {
886 		req->suffix0 = 0x00020000;
887 		req->suffix1 = 0x00000000;
888 	} else {
889 		req->suffix0 = 0x20000000 |
890 			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
891 		req->suffix1 = 0x00000000;
892 	}
893 
894 	return nouveau_abi16_put(abi16, ret);
895 }
896 
897 int
898 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
899 			   struct drm_file *file_priv)
900 {
901 	struct drm_nouveau_gem_cpu_prep *req = data;
902 	struct drm_gem_object *gem;
903 	struct nouveau_bo *nvbo;
904 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
905 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
906 	long lret;
907 	int ret;
908 
909 	gem = drm_gem_object_lookup(file_priv, req->handle);
910 	if (!gem)
911 		return -ENOENT;
912 	nvbo = nouveau_gem_object(gem);
913 
914 	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
915 						   no_wait ? 0 : 30 * HZ);
916 	if (!lret)
917 		ret = -EBUSY;
918 	else if (lret > 0)
919 		ret = 0;
920 	else
921 		ret = lret;
922 
923 	nouveau_bo_sync_for_cpu(nvbo);
924 	drm_gem_object_put(gem);
925 
926 	return ret;
927 }
928 
929 int
930 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
931 			   struct drm_file *file_priv)
932 {
933 	struct drm_nouveau_gem_cpu_fini *req = data;
934 	struct drm_gem_object *gem;
935 	struct nouveau_bo *nvbo;
936 
937 	gem = drm_gem_object_lookup(file_priv, req->handle);
938 	if (!gem)
939 		return -ENOENT;
940 	nvbo = nouveau_gem_object(gem);
941 
942 	nouveau_bo_sync_for_device(nvbo);
943 	drm_gem_object_put(gem);
944 	return 0;
945 }
946 
947 int
948 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
949 		       struct drm_file *file_priv)
950 {
951 	struct drm_nouveau_gem_info *req = data;
952 	struct drm_gem_object *gem;
953 	int ret;
954 
955 	gem = drm_gem_object_lookup(file_priv, req->handle);
956 	if (!gem)
957 		return -ENOENT;
958 
959 	ret = nouveau_gem_info(file_priv, gem, req);
960 	drm_gem_object_put(gem);
961 	return ret;
962 }
963 
964