1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include "nouveau_drm.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31 
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 
35 void
36 nouveau_gem_object_del(struct drm_gem_object *gem)
37 {
38 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
40 	struct ttm_buffer_object *bo = &nvbo->bo;
41 	struct device *dev = drm->dev->dev;
42 	int ret;
43 
44 	ret = pm_runtime_get_sync(dev);
45 	if (WARN_ON(ret < 0 && ret != -EACCES))
46 		return;
47 
48 	if (gem->import_attach)
49 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
50 
51 	drm_gem_object_release(gem);
52 
53 	/* reset filp so nouveau_bo_del_ttm() can test for it */
54 	gem->filp = NULL;
55 	ttm_bo_unref(&bo);
56 
57 	pm_runtime_mark_last_busy(dev);
58 	pm_runtime_put_autosuspend(dev);
59 }
60 
61 int
62 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
63 {
64 	struct nouveau_cli *cli = nouveau_cli(file_priv);
65 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
66 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
67 	struct nvkm_vma *vma;
68 	struct device *dev = drm->dev->dev;
69 	int ret;
70 
71 	if (!cli->vm)
72 		return 0;
73 
74 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
75 	if (ret)
76 		return ret;
77 
78 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
79 	if (!vma) {
80 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
81 		if (!vma) {
82 			ret = -ENOMEM;
83 			goto out;
84 		}
85 
86 		ret = pm_runtime_get_sync(dev);
87 		if (ret < 0 && ret != -EACCES) {
88 			kfree(vma);
89 			goto out;
90 		}
91 
92 		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
93 		if (ret)
94 			kfree(vma);
95 
96 		pm_runtime_mark_last_busy(dev);
97 		pm_runtime_put_autosuspend(dev);
98 	} else {
99 		vma->refcount++;
100 	}
101 
102 out:
103 	ttm_bo_unreserve(&nvbo->bo);
104 	return ret;
105 }
106 
107 static void
108 nouveau_gem_object_delete(void *data)
109 {
110 	struct nvkm_vma *vma = data;
111 	nvkm_vm_unmap(vma);
112 	nvkm_vm_put(vma);
113 	kfree(vma);
114 }
115 
116 static void
117 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
118 {
119 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
120 	struct reservation_object *resv = nvbo->bo.resv;
121 	struct reservation_object_list *fobj;
122 	struct fence *fence = NULL;
123 
124 	fobj = reservation_object_get_list(resv);
125 
126 	list_del(&vma->head);
127 
128 	if (fobj && fobj->shared_count > 1)
129 		ttm_bo_wait(&nvbo->bo, true, false, false);
130 	else if (fobj && fobj->shared_count == 1)
131 		fence = rcu_dereference_protected(fobj->shared[0],
132 						reservation_object_held(resv));
133 	else
134 		fence = reservation_object_get_excl(nvbo->bo.resv);
135 
136 	if (fence && mapped) {
137 		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
138 	} else {
139 		if (mapped)
140 			nvkm_vm_unmap(vma);
141 		nvkm_vm_put(vma);
142 		kfree(vma);
143 	}
144 }
145 
146 void
147 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
148 {
149 	struct nouveau_cli *cli = nouveau_cli(file_priv);
150 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
151 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
152 	struct device *dev = drm->dev->dev;
153 	struct nvkm_vma *vma;
154 	int ret;
155 
156 	if (!cli->vm)
157 		return;
158 
159 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
160 	if (ret)
161 		return;
162 
163 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
164 	if (vma) {
165 		if (--vma->refcount == 0) {
166 			ret = pm_runtime_get_sync(dev);
167 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
168 				nouveau_gem_object_unmap(nvbo, vma);
169 				pm_runtime_mark_last_busy(dev);
170 				pm_runtime_put_autosuspend(dev);
171 			}
172 		}
173 	}
174 	ttm_bo_unreserve(&nvbo->bo);
175 }
176 
177 int
178 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
179 		uint32_t tile_mode, uint32_t tile_flags,
180 		struct nouveau_bo **pnvbo)
181 {
182 	struct nouveau_drm *drm = nouveau_drm(dev);
183 	struct nouveau_bo *nvbo;
184 	u32 flags = 0;
185 	int ret;
186 
187 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
188 		flags |= TTM_PL_FLAG_VRAM;
189 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
190 		flags |= TTM_PL_FLAG_TT;
191 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
192 		flags |= TTM_PL_FLAG_SYSTEM;
193 
194 	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
195 		flags |= TTM_PL_FLAG_UNCACHED;
196 
197 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
198 			     tile_flags, NULL, NULL, pnvbo);
199 	if (ret)
200 		return ret;
201 	nvbo = *pnvbo;
202 
203 	/* we restrict allowed domains on nv50+ to only the types
204 	 * that were requested at creation time.  not possibly on
205 	 * earlier chips without busting the ABI.
206 	 */
207 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208 			      NOUVEAU_GEM_DOMAIN_GART;
209 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210 		nvbo->valid_domains &= domain;
211 
212 	/* Initialize the embedded gem-object. We return a single gem-reference
213 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
214 	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
215 	if (ret) {
216 		nouveau_bo_ref(NULL, pnvbo);
217 		return -ENOMEM;
218 	}
219 
220 	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
221 	return 0;
222 }
223 
224 static int
225 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
226 		 struct drm_nouveau_gem_info *rep)
227 {
228 	struct nouveau_cli *cli = nouveau_cli(file_priv);
229 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
230 	struct nvkm_vma *vma;
231 
232 	if (is_power_of_2(nvbo->valid_domains))
233 		rep->domain = nvbo->valid_domains;
234 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
235 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
236 	else
237 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
238 	rep->offset = nvbo->bo.offset;
239 	if (cli->vm) {
240 		vma = nouveau_bo_vma_find(nvbo, cli->vm);
241 		if (!vma)
242 			return -EINVAL;
243 
244 		rep->offset = vma->offset;
245 	}
246 
247 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
248 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
249 	rep->tile_mode = nvbo->tile_mode;
250 	rep->tile_flags = nvbo->tile_flags;
251 	return 0;
252 }
253 
254 int
255 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256 		      struct drm_file *file_priv)
257 {
258 	struct nouveau_drm *drm = nouveau_drm(dev);
259 	struct nouveau_cli *cli = nouveau_cli(file_priv);
260 	struct nvkm_fb *fb = nvxx_fb(&drm->device);
261 	struct drm_nouveau_gem_new *req = data;
262 	struct nouveau_bo *nvbo = NULL;
263 	int ret = 0;
264 
265 	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
266 		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
267 		return -EINVAL;
268 	}
269 
270 	ret = nouveau_gem_new(dev, req->info.size, req->align,
271 			      req->info.domain, req->info.tile_mode,
272 			      req->info.tile_flags, &nvbo);
273 	if (ret)
274 		return ret;
275 
276 	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
277 	if (ret == 0) {
278 		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
279 		if (ret)
280 			drm_gem_handle_delete(file_priv, req->info.handle);
281 	}
282 
283 	/* drop reference from allocate - handle holds it now */
284 	drm_gem_object_unreference_unlocked(&nvbo->gem);
285 	return ret;
286 }
287 
288 static int
289 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
290 		       uint32_t write_domains, uint32_t valid_domains)
291 {
292 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
293 	struct ttm_buffer_object *bo = &nvbo->bo;
294 	uint32_t domains = valid_domains & nvbo->valid_domains &
295 		(write_domains ? write_domains : read_domains);
296 	uint32_t pref_flags = 0, valid_flags = 0;
297 
298 	if (!domains)
299 		return -EINVAL;
300 
301 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
302 		valid_flags |= TTM_PL_FLAG_VRAM;
303 
304 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
305 		valid_flags |= TTM_PL_FLAG_TT;
306 
307 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
308 	    bo->mem.mem_type == TTM_PL_VRAM)
309 		pref_flags |= TTM_PL_FLAG_VRAM;
310 
311 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
312 		 bo->mem.mem_type == TTM_PL_TT)
313 		pref_flags |= TTM_PL_FLAG_TT;
314 
315 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
316 		pref_flags |= TTM_PL_FLAG_VRAM;
317 
318 	else
319 		pref_flags |= TTM_PL_FLAG_TT;
320 
321 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
322 
323 	return 0;
324 }
325 
326 struct validate_op {
327 	struct list_head list;
328 	struct ww_acquire_ctx ticket;
329 };
330 
331 static void
332 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
333 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
334 {
335 	struct nouveau_bo *nvbo;
336 	struct drm_nouveau_gem_pushbuf_bo *b;
337 
338 	while (!list_empty(&op->list)) {
339 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
340 		b = &pbbo[nvbo->pbbo_index];
341 
342 		if (likely(fence))
343 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
344 
345 		if (unlikely(nvbo->validate_mapped)) {
346 			ttm_bo_kunmap(&nvbo->kmap);
347 			nvbo->validate_mapped = false;
348 		}
349 
350 		list_del(&nvbo->entry);
351 		nvbo->reserved_by = NULL;
352 		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
353 		drm_gem_object_unreference_unlocked(&nvbo->gem);
354 	}
355 }
356 
357 static void
358 validate_fini(struct validate_op *op, struct nouveau_fence *fence,
359 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
360 {
361 	validate_fini_no_ticket(op, fence, pbbo);
362 	ww_acquire_fini(&op->ticket);
363 }
364 
365 static int
366 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
367 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
368 	      int nr_buffers, struct validate_op *op)
369 {
370 	struct nouveau_cli *cli = nouveau_cli(file_priv);
371 	struct drm_device *dev = chan->drm->dev;
372 	int trycnt = 0;
373 	int ret, i;
374 	struct nouveau_bo *res_bo = NULL;
375 	LIST_HEAD(gart_list);
376 	LIST_HEAD(vram_list);
377 	LIST_HEAD(both_list);
378 
379 	ww_acquire_init(&op->ticket, &reservation_ww_class);
380 retry:
381 	if (++trycnt > 100000) {
382 		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
383 		return -EINVAL;
384 	}
385 
386 	for (i = 0; i < nr_buffers; i++) {
387 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
388 		struct drm_gem_object *gem;
389 		struct nouveau_bo *nvbo;
390 
391 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
392 		if (!gem) {
393 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
394 			ret = -ENOENT;
395 			break;
396 		}
397 		nvbo = nouveau_gem_object(gem);
398 		if (nvbo == res_bo) {
399 			res_bo = NULL;
400 			drm_gem_object_unreference_unlocked(gem);
401 			continue;
402 		}
403 
404 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
405 			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
406 				      "validation list\n", b->handle);
407 			drm_gem_object_unreference_unlocked(gem);
408 			ret = -EINVAL;
409 			break;
410 		}
411 
412 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
413 		if (ret) {
414 			list_splice_tail_init(&vram_list, &op->list);
415 			list_splice_tail_init(&gart_list, &op->list);
416 			list_splice_tail_init(&both_list, &op->list);
417 			validate_fini_no_ticket(op, NULL, NULL);
418 			if (unlikely(ret == -EDEADLK)) {
419 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
420 							      &op->ticket);
421 				if (!ret)
422 					res_bo = nvbo;
423 			}
424 			if (unlikely(ret)) {
425 				if (ret != -ERESTARTSYS)
426 					NV_PRINTK(err, cli, "fail reserve\n");
427 				break;
428 			}
429 		}
430 
431 		b->user_priv = (uint64_t)(unsigned long)nvbo;
432 		nvbo->reserved_by = file_priv;
433 		nvbo->pbbo_index = i;
434 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
435 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
436 			list_add_tail(&nvbo->entry, &both_list);
437 		else
438 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
439 			list_add_tail(&nvbo->entry, &vram_list);
440 		else
441 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
442 			list_add_tail(&nvbo->entry, &gart_list);
443 		else {
444 			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
445 				 b->valid_domains);
446 			list_add_tail(&nvbo->entry, &both_list);
447 			ret = -EINVAL;
448 			break;
449 		}
450 		if (nvbo == res_bo)
451 			goto retry;
452 	}
453 
454 	ww_acquire_done(&op->ticket);
455 	list_splice_tail(&vram_list, &op->list);
456 	list_splice_tail(&gart_list, &op->list);
457 	list_splice_tail(&both_list, &op->list);
458 	if (ret)
459 		validate_fini(op, NULL, NULL);
460 	return ret;
461 
462 }
463 
464 static int
465 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
466 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
467 	      uint64_t user_pbbo_ptr)
468 {
469 	struct nouveau_drm *drm = chan->drm;
470 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
471 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
472 	struct nouveau_bo *nvbo;
473 	int ret, relocs = 0;
474 
475 	list_for_each_entry(nvbo, list, entry) {
476 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
477 
478 		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
479 					     b->write_domains,
480 					     b->valid_domains);
481 		if (unlikely(ret)) {
482 			NV_PRINTK(err, cli, "fail set_domain\n");
483 			return ret;
484 		}
485 
486 		ret = nouveau_bo_validate(nvbo, true, false);
487 		if (unlikely(ret)) {
488 			if (ret != -ERESTARTSYS)
489 				NV_PRINTK(err, cli, "fail ttm_validate\n");
490 			return ret;
491 		}
492 
493 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
494 		if (unlikely(ret)) {
495 			if (ret != -ERESTARTSYS)
496 				NV_PRINTK(err, cli, "fail post-validate sync\n");
497 			return ret;
498 		}
499 
500 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
501 			if (nvbo->bo.offset == b->presumed.offset &&
502 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
503 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
504 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
505 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
506 				continue;
507 
508 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
509 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
510 			else
511 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
512 			b->presumed.offset = nvbo->bo.offset;
513 			b->presumed.valid = 0;
514 			relocs++;
515 
516 			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
517 					     &b->presumed, sizeof(b->presumed)))
518 				return -EFAULT;
519 		}
520 	}
521 
522 	return relocs;
523 }
524 
525 static int
526 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
527 			     struct drm_file *file_priv,
528 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
529 			     uint64_t user_buffers, int nr_buffers,
530 			     struct validate_op *op, int *apply_relocs)
531 {
532 	struct nouveau_cli *cli = nouveau_cli(file_priv);
533 	int ret;
534 
535 	INIT_LIST_HEAD(&op->list);
536 
537 	if (nr_buffers == 0)
538 		return 0;
539 
540 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
541 	if (unlikely(ret)) {
542 		if (ret != -ERESTARTSYS)
543 			NV_PRINTK(err, cli, "validate_init\n");
544 		return ret;
545 	}
546 
547 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
548 	if (unlikely(ret < 0)) {
549 		if (ret != -ERESTARTSYS)
550 			NV_PRINTK(err, cli, "validating bo list\n");
551 		validate_fini(op, NULL, NULL);
552 		return ret;
553 	}
554 	*apply_relocs = ret;
555 	return 0;
556 }
557 
558 static inline void
559 u_free(void *addr)
560 {
561 	kvfree(addr);
562 }
563 
564 static inline void *
565 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
566 {
567 	void *mem;
568 	void __user *userptr = (void __force __user *)(uintptr_t)user;
569 
570 	size *= nmemb;
571 
572 	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
573 	if (!mem)
574 		mem = vmalloc(size);
575 	if (!mem)
576 		return ERR_PTR(-ENOMEM);
577 
578 	if (copy_from_user(mem, userptr, size)) {
579 		u_free(mem);
580 		return ERR_PTR(-EFAULT);
581 	}
582 
583 	return mem;
584 }
585 
586 static int
587 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
588 				struct drm_nouveau_gem_pushbuf *req,
589 				struct drm_nouveau_gem_pushbuf_bo *bo)
590 {
591 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
592 	int ret = 0;
593 	unsigned i;
594 
595 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
596 	if (IS_ERR(reloc))
597 		return PTR_ERR(reloc);
598 
599 	for (i = 0; i < req->nr_relocs; i++) {
600 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
601 		struct drm_nouveau_gem_pushbuf_bo *b;
602 		struct nouveau_bo *nvbo;
603 		uint32_t data;
604 
605 		if (unlikely(r->bo_index > req->nr_buffers)) {
606 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
607 			ret = -EINVAL;
608 			break;
609 		}
610 
611 		b = &bo[r->bo_index];
612 		if (b->presumed.valid)
613 			continue;
614 
615 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
616 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
617 			ret = -EINVAL;
618 			break;
619 		}
620 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
621 
622 		if (unlikely(r->reloc_bo_offset + 4 >
623 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
624 			NV_PRINTK(err, cli, "reloc outside of bo\n");
625 			ret = -EINVAL;
626 			break;
627 		}
628 
629 		if (!nvbo->kmap.virtual) {
630 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
631 					  &nvbo->kmap);
632 			if (ret) {
633 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
634 				break;
635 			}
636 			nvbo->validate_mapped = true;
637 		}
638 
639 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
640 			data = b->presumed.offset + r->data;
641 		else
642 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
643 			data = (b->presumed.offset + r->data) >> 32;
644 		else
645 			data = r->data;
646 
647 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
648 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
649 				data |= r->tor;
650 			else
651 				data |= r->vor;
652 		}
653 
654 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
655 		if (ret) {
656 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
657 			break;
658 		}
659 
660 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
661 	}
662 
663 	u_free(reloc);
664 	return ret;
665 }
666 
667 int
668 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
669 			  struct drm_file *file_priv)
670 {
671 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
672 	struct nouveau_cli *cli = nouveau_cli(file_priv);
673 	struct nouveau_abi16_chan *temp;
674 	struct nouveau_drm *drm = nouveau_drm(dev);
675 	struct drm_nouveau_gem_pushbuf *req = data;
676 	struct drm_nouveau_gem_pushbuf_push *push;
677 	struct drm_nouveau_gem_pushbuf_bo *bo;
678 	struct nouveau_channel *chan = NULL;
679 	struct validate_op op;
680 	struct nouveau_fence *fence = NULL;
681 	int i, j, ret = 0, do_reloc = 0;
682 
683 	if (unlikely(!abi16))
684 		return -ENOMEM;
685 
686 	list_for_each_entry(temp, &abi16->channels, head) {
687 		if (temp->chan->chid == req->channel) {
688 			chan = temp->chan;
689 			break;
690 		}
691 	}
692 
693 	if (!chan)
694 		return nouveau_abi16_put(abi16, -ENOENT);
695 
696 	req->vram_available = drm->gem.vram_available;
697 	req->gart_available = drm->gem.gart_available;
698 	if (unlikely(req->nr_push == 0))
699 		goto out_next;
700 
701 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
702 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
703 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
704 		return nouveau_abi16_put(abi16, -EINVAL);
705 	}
706 
707 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
708 		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
709 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
710 		return nouveau_abi16_put(abi16, -EINVAL);
711 	}
712 
713 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
714 		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
715 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
716 		return nouveau_abi16_put(abi16, -EINVAL);
717 	}
718 
719 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
720 	if (IS_ERR(push))
721 		return nouveau_abi16_put(abi16, PTR_ERR(push));
722 
723 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
724 	if (IS_ERR(bo)) {
725 		u_free(push);
726 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
727 	}
728 
729 	/* Ensure all push buffers are on validate list */
730 	for (i = 0; i < req->nr_push; i++) {
731 		if (push[i].bo_index >= req->nr_buffers) {
732 			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
733 			ret = -EINVAL;
734 			goto out_prevalid;
735 		}
736 	}
737 
738 	/* Validate buffer list */
739 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
740 					   req->nr_buffers, &op, &do_reloc);
741 	if (ret) {
742 		if (ret != -ERESTARTSYS)
743 			NV_PRINTK(err, cli, "validate: %d\n", ret);
744 		goto out_prevalid;
745 	}
746 
747 	/* Apply any relocations that are required */
748 	if (do_reloc) {
749 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
750 		if (ret) {
751 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
752 			goto out;
753 		}
754 	}
755 
756 	if (chan->dma.ib_max) {
757 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
758 		if (ret) {
759 			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
760 			goto out;
761 		}
762 
763 		for (i = 0; i < req->nr_push; i++) {
764 			struct nouveau_bo *nvbo = (void *)(unsigned long)
765 				bo[push[i].bo_index].user_priv;
766 
767 			nv50_dma_push(chan, nvbo, push[i].offset,
768 				      push[i].length);
769 		}
770 	} else
771 	if (drm->device.info.chipset >= 0x25) {
772 		ret = RING_SPACE(chan, req->nr_push * 2);
773 		if (ret) {
774 			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
775 			goto out;
776 		}
777 
778 		for (i = 0; i < req->nr_push; i++) {
779 			struct nouveau_bo *nvbo = (void *)(unsigned long)
780 				bo[push[i].bo_index].user_priv;
781 
782 			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
783 			OUT_RING(chan, 0);
784 		}
785 	} else {
786 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
787 		if (ret) {
788 			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
789 			goto out;
790 		}
791 
792 		for (i = 0; i < req->nr_push; i++) {
793 			struct nouveau_bo *nvbo = (void *)(unsigned long)
794 				bo[push[i].bo_index].user_priv;
795 			uint32_t cmd;
796 
797 			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
798 			cmd |= 0x20000000;
799 			if (unlikely(cmd != req->suffix0)) {
800 				if (!nvbo->kmap.virtual) {
801 					ret = ttm_bo_kmap(&nvbo->bo, 0,
802 							  nvbo->bo.mem.
803 							  num_pages,
804 							  &nvbo->kmap);
805 					if (ret) {
806 						WIND_RING(chan);
807 						goto out;
808 					}
809 					nvbo->validate_mapped = true;
810 				}
811 
812 				nouveau_bo_wr32(nvbo, (push[i].offset +
813 						push[i].length - 8) / 4, cmd);
814 			}
815 
816 			OUT_RING(chan, 0x20000000 |
817 				      (nvbo->bo.offset + push[i].offset));
818 			OUT_RING(chan, 0);
819 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
820 				OUT_RING(chan, 0);
821 		}
822 	}
823 
824 	ret = nouveau_fence_new(chan, false, &fence);
825 	if (ret) {
826 		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
827 		WIND_RING(chan);
828 		goto out;
829 	}
830 
831 out:
832 	validate_fini(&op, fence, bo);
833 	nouveau_fence_unref(&fence);
834 
835 out_prevalid:
836 	u_free(bo);
837 	u_free(push);
838 
839 out_next:
840 	if (chan->dma.ib_max) {
841 		req->suffix0 = 0x00000000;
842 		req->suffix1 = 0x00000000;
843 	} else
844 	if (drm->device.info.chipset >= 0x25) {
845 		req->suffix0 = 0x00020000;
846 		req->suffix1 = 0x00000000;
847 	} else {
848 		req->suffix0 = 0x20000000 |
849 			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
850 		req->suffix1 = 0x00000000;
851 	}
852 
853 	return nouveau_abi16_put(abi16, ret);
854 }
855 
856 int
857 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
858 			   struct drm_file *file_priv)
859 {
860 	struct drm_nouveau_gem_cpu_prep *req = data;
861 	struct drm_gem_object *gem;
862 	struct nouveau_bo *nvbo;
863 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
864 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
865 	int ret;
866 
867 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
868 	if (!gem)
869 		return -ENOENT;
870 	nvbo = nouveau_gem_object(gem);
871 
872 	if (no_wait)
873 		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
874 	else {
875 		long lret;
876 
877 		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
878 		if (!lret)
879 			ret = -EBUSY;
880 		else if (lret > 0)
881 			ret = 0;
882 		else
883 			ret = lret;
884 	}
885 	nouveau_bo_sync_for_cpu(nvbo);
886 	drm_gem_object_unreference_unlocked(gem);
887 
888 	return ret;
889 }
890 
891 int
892 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
893 			   struct drm_file *file_priv)
894 {
895 	struct drm_nouveau_gem_cpu_fini *req = data;
896 	struct drm_gem_object *gem;
897 	struct nouveau_bo *nvbo;
898 
899 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
900 	if (!gem)
901 		return -ENOENT;
902 	nvbo = nouveau_gem_object(gem);
903 
904 	nouveau_bo_sync_for_device(nvbo);
905 	drm_gem_object_unreference_unlocked(gem);
906 	return 0;
907 }
908 
909 int
910 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
911 		       struct drm_file *file_priv)
912 {
913 	struct drm_nouveau_gem_info *req = data;
914 	struct drm_gem_object *gem;
915 	int ret;
916 
917 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
918 	if (!gem)
919 		return -ENOENT;
920 
921 	ret = nouveau_gem_info(file_priv, gem, req);
922 	drm_gem_object_unreference_unlocked(gem);
923 	return ret;
924 }
925 
926