1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 #include "drmP.h"
27 #include "drm.h"
28 
29 #include "nouveau_drv.h"
30 #include "nouveau_drm.h"
31 #include "nouveau_dma.h"
32 
33 #define nouveau_gem_pushbuf_sync(chan) 0
34 
35 int
36 nouveau_gem_object_new(struct drm_gem_object *gem)
37 {
38 	return 0;
39 }
40 
41 void
42 nouveau_gem_object_del(struct drm_gem_object *gem)
43 {
44 	struct nouveau_bo *nvbo = gem->driver_private;
45 	struct ttm_buffer_object *bo = &nvbo->bo;
46 
47 	if (!nvbo)
48 		return;
49 	nvbo->gem = NULL;
50 
51 	if (unlikely(nvbo->pin_refcnt)) {
52 		nvbo->pin_refcnt = 1;
53 		nouveau_bo_unpin(nvbo);
54 	}
55 
56 	ttm_bo_unref(&bo);
57 
58 	drm_gem_object_release(gem);
59 	kfree(gem);
60 }
61 
62 int
63 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
64 {
65 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
66 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
67 	struct nouveau_vma *vma;
68 	int ret;
69 
70 	if (!fpriv->vm)
71 		return 0;
72 
73 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
74 	if (ret)
75 		return ret;
76 
77 	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
78 	if (!vma) {
79 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
80 		if (!vma) {
81 			ret = -ENOMEM;
82 			goto out;
83 		}
84 
85 		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
86 		if (ret) {
87 			kfree(vma);
88 			goto out;
89 		}
90 	} else {
91 		vma->refcount++;
92 	}
93 
94 out:
95 	ttm_bo_unreserve(&nvbo->bo);
96 	return ret;
97 }
98 
99 void
100 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
101 {
102 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
103 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
104 	struct nouveau_vma *vma;
105 	int ret;
106 
107 	if (!fpriv->vm)
108 		return;
109 
110 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
111 	if (ret)
112 		return;
113 
114 	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
115 	if (vma) {
116 		if (--vma->refcount == 0) {
117 			nouveau_bo_vma_del(nvbo, vma);
118 			kfree(vma);
119 		}
120 	}
121 	ttm_bo_unreserve(&nvbo->bo);
122 }
123 
124 int
125 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
126 		uint32_t tile_mode, uint32_t tile_flags,
127 		struct nouveau_bo **pnvbo)
128 {
129 	struct drm_nouveau_private *dev_priv = dev->dev_private;
130 	struct nouveau_bo *nvbo;
131 	u32 flags = 0;
132 	int ret;
133 
134 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
135 		flags |= TTM_PL_FLAG_VRAM;
136 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
137 		flags |= TTM_PL_FLAG_TT;
138 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
139 		flags |= TTM_PL_FLAG_SYSTEM;
140 
141 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
142 			     tile_flags, pnvbo);
143 	if (ret)
144 		return ret;
145 	nvbo = *pnvbo;
146 
147 	/* we restrict allowed domains on nv50+ to only the types
148 	 * that were requested at creation time.  not possibly on
149 	 * earlier chips without busting the ABI.
150 	 */
151 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
152 			      NOUVEAU_GEM_DOMAIN_GART;
153 	if (dev_priv->card_type >= NV_50)
154 		nvbo->valid_domains &= domain;
155 
156 	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
157 	if (!nvbo->gem) {
158 		nouveau_bo_ref(NULL, pnvbo);
159 		return -ENOMEM;
160 	}
161 
162 	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
163 	nvbo->gem->driver_private = nvbo;
164 	return 0;
165 }
166 
167 static int
168 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
169 		 struct drm_nouveau_gem_info *rep)
170 {
171 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
172 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
173 	struct nouveau_vma *vma;
174 
175 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
176 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
177 	else
178 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
179 
180 	rep->offset = nvbo->bo.offset;
181 	if (fpriv->vm) {
182 		vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
183 		if (!vma)
184 			return -EINVAL;
185 
186 		rep->offset = vma->offset;
187 	}
188 
189 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
190 	rep->map_handle = nvbo->bo.addr_space_offset;
191 	rep->tile_mode = nvbo->tile_mode;
192 	rep->tile_flags = nvbo->tile_flags;
193 	return 0;
194 }
195 
196 int
197 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
198 		      struct drm_file *file_priv)
199 {
200 	struct drm_nouveau_private *dev_priv = dev->dev_private;
201 	struct drm_nouveau_gem_new *req = data;
202 	struct nouveau_bo *nvbo = NULL;
203 	int ret = 0;
204 
205 	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
206 		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
207 
208 	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
209 		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
210 		return -EINVAL;
211 	}
212 
213 	ret = nouveau_gem_new(dev, req->info.size, req->align,
214 			      req->info.domain, req->info.tile_mode,
215 			      req->info.tile_flags, &nvbo);
216 	if (ret)
217 		return ret;
218 
219 	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
220 	if (ret == 0) {
221 		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
222 		if (ret)
223 			drm_gem_handle_delete(file_priv, req->info.handle);
224 	}
225 
226 	/* drop reference from allocate - handle holds it now */
227 	drm_gem_object_unreference_unlocked(nvbo->gem);
228 	return ret;
229 }
230 
231 static int
232 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
233 		       uint32_t write_domains, uint32_t valid_domains)
234 {
235 	struct nouveau_bo *nvbo = gem->driver_private;
236 	struct ttm_buffer_object *bo = &nvbo->bo;
237 	uint32_t domains = valid_domains & nvbo->valid_domains &
238 		(write_domains ? write_domains : read_domains);
239 	uint32_t pref_flags = 0, valid_flags = 0;
240 
241 	if (!domains)
242 		return -EINVAL;
243 
244 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
245 		valid_flags |= TTM_PL_FLAG_VRAM;
246 
247 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
248 		valid_flags |= TTM_PL_FLAG_TT;
249 
250 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
251 	    bo->mem.mem_type == TTM_PL_VRAM)
252 		pref_flags |= TTM_PL_FLAG_VRAM;
253 
254 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
255 		 bo->mem.mem_type == TTM_PL_TT)
256 		pref_flags |= TTM_PL_FLAG_TT;
257 
258 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
259 		pref_flags |= TTM_PL_FLAG_VRAM;
260 
261 	else
262 		pref_flags |= TTM_PL_FLAG_TT;
263 
264 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
265 
266 	return 0;
267 }
268 
269 struct validate_op {
270 	struct list_head vram_list;
271 	struct list_head gart_list;
272 	struct list_head both_list;
273 };
274 
275 static void
276 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
277 {
278 	struct list_head *entry, *tmp;
279 	struct nouveau_bo *nvbo;
280 
281 	list_for_each_safe(entry, tmp, list) {
282 		nvbo = list_entry(entry, struct nouveau_bo, entry);
283 
284 		nouveau_bo_fence(nvbo, fence);
285 
286 		if (unlikely(nvbo->validate_mapped)) {
287 			ttm_bo_kunmap(&nvbo->kmap);
288 			nvbo->validate_mapped = false;
289 		}
290 
291 		list_del(&nvbo->entry);
292 		nvbo->reserved_by = NULL;
293 		ttm_bo_unreserve(&nvbo->bo);
294 		drm_gem_object_unreference_unlocked(nvbo->gem);
295 	}
296 }
297 
298 static void
299 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
300 {
301 	validate_fini_list(&op->vram_list, fence);
302 	validate_fini_list(&op->gart_list, fence);
303 	validate_fini_list(&op->both_list, fence);
304 }
305 
306 static int
307 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
308 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
309 	      int nr_buffers, struct validate_op *op)
310 {
311 	struct drm_device *dev = chan->dev;
312 	struct drm_nouveau_private *dev_priv = dev->dev_private;
313 	uint32_t sequence;
314 	int trycnt = 0;
315 	int ret, i;
316 
317 	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
318 retry:
319 	if (++trycnt > 100000) {
320 		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
321 		return -EINVAL;
322 	}
323 
324 	for (i = 0; i < nr_buffers; i++) {
325 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
326 		struct drm_gem_object *gem;
327 		struct nouveau_bo *nvbo;
328 
329 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
330 		if (!gem) {
331 			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
332 			validate_fini(op, NULL);
333 			return -ENOENT;
334 		}
335 		nvbo = gem->driver_private;
336 
337 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
338 			NV_ERROR(dev, "multiple instances of buffer %d on "
339 				      "validation list\n", b->handle);
340 			validate_fini(op, NULL);
341 			return -EINVAL;
342 		}
343 
344 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
345 		if (ret) {
346 			validate_fini(op, NULL);
347 			if (unlikely(ret == -EAGAIN))
348 				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
349 			drm_gem_object_unreference_unlocked(gem);
350 			if (unlikely(ret)) {
351 				if (ret != -ERESTARTSYS)
352 					NV_ERROR(dev, "fail reserve\n");
353 				return ret;
354 			}
355 			goto retry;
356 		}
357 
358 		b->user_priv = (uint64_t)(unsigned long)nvbo;
359 		nvbo->reserved_by = file_priv;
360 		nvbo->pbbo_index = i;
361 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
362 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
363 			list_add_tail(&nvbo->entry, &op->both_list);
364 		else
365 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
366 			list_add_tail(&nvbo->entry, &op->vram_list);
367 		else
368 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
369 			list_add_tail(&nvbo->entry, &op->gart_list);
370 		else {
371 			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
372 				 b->valid_domains);
373 			list_add_tail(&nvbo->entry, &op->both_list);
374 			validate_fini(op, NULL);
375 			return -EINVAL;
376 		}
377 	}
378 
379 	return 0;
380 }
381 
382 static int
383 validate_list(struct nouveau_channel *chan, struct list_head *list,
384 	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
385 {
386 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
387 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
388 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
389 	struct drm_device *dev = chan->dev;
390 	struct nouveau_bo *nvbo;
391 	int ret, relocs = 0;
392 
393 	list_for_each_entry(nvbo, list, entry) {
394 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
395 
396 		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
397 		if (unlikely(ret)) {
398 			NV_ERROR(dev, "fail pre-validate sync\n");
399 			return ret;
400 		}
401 
402 		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
403 					     b->write_domains,
404 					     b->valid_domains);
405 		if (unlikely(ret)) {
406 			NV_ERROR(dev, "fail set_domain\n");
407 			return ret;
408 		}
409 
410 		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
411 		ret = nouveau_bo_validate(nvbo, true, false, false);
412 		nvbo->channel = NULL;
413 		if (unlikely(ret)) {
414 			if (ret != -ERESTARTSYS)
415 				NV_ERROR(dev, "fail ttm_validate\n");
416 			return ret;
417 		}
418 
419 		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
420 		if (unlikely(ret)) {
421 			NV_ERROR(dev, "fail post-validate sync\n");
422 			return ret;
423 		}
424 
425 		if (dev_priv->card_type < NV_50) {
426 			if (nvbo->bo.offset == b->presumed.offset &&
427 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
428 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
429 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
430 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
431 				continue;
432 
433 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
434 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
435 			else
436 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
437 			b->presumed.offset = nvbo->bo.offset;
438 			b->presumed.valid = 0;
439 			relocs++;
440 
441 			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
442 					     &b->presumed, sizeof(b->presumed)))
443 				return -EFAULT;
444 		}
445 	}
446 
447 	return relocs;
448 }
449 
450 static int
451 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
452 			     struct drm_file *file_priv,
453 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
454 			     uint64_t user_buffers, int nr_buffers,
455 			     struct validate_op *op, int *apply_relocs)
456 {
457 	struct drm_device *dev = chan->dev;
458 	int ret, relocs = 0;
459 
460 	INIT_LIST_HEAD(&op->vram_list);
461 	INIT_LIST_HEAD(&op->gart_list);
462 	INIT_LIST_HEAD(&op->both_list);
463 
464 	if (nr_buffers == 0)
465 		return 0;
466 
467 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
468 	if (unlikely(ret)) {
469 		if (ret != -ERESTARTSYS)
470 			NV_ERROR(dev, "validate_init\n");
471 		return ret;
472 	}
473 
474 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
475 	if (unlikely(ret < 0)) {
476 		if (ret != -ERESTARTSYS)
477 			NV_ERROR(dev, "validate vram_list\n");
478 		validate_fini(op, NULL);
479 		return ret;
480 	}
481 	relocs += ret;
482 
483 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
484 	if (unlikely(ret < 0)) {
485 		if (ret != -ERESTARTSYS)
486 			NV_ERROR(dev, "validate gart_list\n");
487 		validate_fini(op, NULL);
488 		return ret;
489 	}
490 	relocs += ret;
491 
492 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
493 	if (unlikely(ret < 0)) {
494 		if (ret != -ERESTARTSYS)
495 			NV_ERROR(dev, "validate both_list\n");
496 		validate_fini(op, NULL);
497 		return ret;
498 	}
499 	relocs += ret;
500 
501 	*apply_relocs = relocs;
502 	return 0;
503 }
504 
505 static inline void *
506 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
507 {
508 	void *mem;
509 	void __user *userptr = (void __force __user *)(uintptr_t)user;
510 
511 	mem = kmalloc(nmemb * size, GFP_KERNEL);
512 	if (!mem)
513 		return ERR_PTR(-ENOMEM);
514 
515 	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
516 		kfree(mem);
517 		return ERR_PTR(-EFAULT);
518 	}
519 
520 	return mem;
521 }
522 
523 static int
524 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
525 				struct drm_nouveau_gem_pushbuf *req,
526 				struct drm_nouveau_gem_pushbuf_bo *bo)
527 {
528 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
529 	int ret = 0;
530 	unsigned i;
531 
532 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
533 	if (IS_ERR(reloc))
534 		return PTR_ERR(reloc);
535 
536 	for (i = 0; i < req->nr_relocs; i++) {
537 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
538 		struct drm_nouveau_gem_pushbuf_bo *b;
539 		struct nouveau_bo *nvbo;
540 		uint32_t data;
541 
542 		if (unlikely(r->bo_index > req->nr_buffers)) {
543 			NV_ERROR(dev, "reloc bo index invalid\n");
544 			ret = -EINVAL;
545 			break;
546 		}
547 
548 		b = &bo[r->bo_index];
549 		if (b->presumed.valid)
550 			continue;
551 
552 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
553 			NV_ERROR(dev, "reloc container bo index invalid\n");
554 			ret = -EINVAL;
555 			break;
556 		}
557 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
558 
559 		if (unlikely(r->reloc_bo_offset + 4 >
560 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
561 			NV_ERROR(dev, "reloc outside of bo\n");
562 			ret = -EINVAL;
563 			break;
564 		}
565 
566 		if (!nvbo->kmap.virtual) {
567 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
568 					  &nvbo->kmap);
569 			if (ret) {
570 				NV_ERROR(dev, "failed kmap for reloc\n");
571 				break;
572 			}
573 			nvbo->validate_mapped = true;
574 		}
575 
576 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
577 			data = b->presumed.offset + r->data;
578 		else
579 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
580 			data = (b->presumed.offset + r->data) >> 32;
581 		else
582 			data = r->data;
583 
584 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
585 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
586 				data |= r->tor;
587 			else
588 				data |= r->vor;
589 		}
590 
591 		spin_lock(&nvbo->bo.bdev->fence_lock);
592 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
593 		spin_unlock(&nvbo->bo.bdev->fence_lock);
594 		if (ret) {
595 			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
596 			break;
597 		}
598 
599 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
600 	}
601 
602 	kfree(reloc);
603 	return ret;
604 }
605 
606 int
607 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
608 			  struct drm_file *file_priv)
609 {
610 	struct drm_nouveau_private *dev_priv = dev->dev_private;
611 	struct drm_nouveau_gem_pushbuf *req = data;
612 	struct drm_nouveau_gem_pushbuf_push *push;
613 	struct drm_nouveau_gem_pushbuf_bo *bo;
614 	struct nouveau_channel *chan;
615 	struct validate_op op;
616 	struct nouveau_fence *fence = NULL;
617 	int i, j, ret = 0, do_reloc = 0;
618 
619 	chan = nouveau_channel_get(file_priv, req->channel);
620 	if (IS_ERR(chan))
621 		return PTR_ERR(chan);
622 
623 	req->vram_available = dev_priv->fb_aper_free;
624 	req->gart_available = dev_priv->gart_info.aper_free;
625 	if (unlikely(req->nr_push == 0))
626 		goto out_next;
627 
628 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
629 		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
630 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
631 		nouveau_channel_put(&chan);
632 		return -EINVAL;
633 	}
634 
635 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
636 		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
637 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
638 		nouveau_channel_put(&chan);
639 		return -EINVAL;
640 	}
641 
642 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
643 		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
644 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
645 		nouveau_channel_put(&chan);
646 		return -EINVAL;
647 	}
648 
649 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
650 	if (IS_ERR(push)) {
651 		nouveau_channel_put(&chan);
652 		return PTR_ERR(push);
653 	}
654 
655 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
656 	if (IS_ERR(bo)) {
657 		kfree(push);
658 		nouveau_channel_put(&chan);
659 		return PTR_ERR(bo);
660 	}
661 
662 	/* Mark push buffers as being used on PFIFO, the validation code
663 	 * will then make sure that if the pushbuf bo moves, that they
664 	 * happen on the kernel channel, which will in turn cause a sync
665 	 * to happen before we try and submit the push buffer.
666 	 */
667 	for (i = 0; i < req->nr_push; i++) {
668 		if (push[i].bo_index >= req->nr_buffers) {
669 			NV_ERROR(dev, "push %d buffer not in list\n", i);
670 			ret = -EINVAL;
671 			goto out_prevalid;
672 		}
673 
674 		bo[push[i].bo_index].read_domains |= (1 << 31);
675 	}
676 
677 	/* Validate buffer list */
678 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
679 					   req->nr_buffers, &op, &do_reloc);
680 	if (ret) {
681 		if (ret != -ERESTARTSYS)
682 			NV_ERROR(dev, "validate: %d\n", ret);
683 		goto out_prevalid;
684 	}
685 
686 	/* Apply any relocations that are required */
687 	if (do_reloc) {
688 		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
689 		if (ret) {
690 			NV_ERROR(dev, "reloc apply: %d\n", ret);
691 			goto out;
692 		}
693 	}
694 
695 	if (chan->dma.ib_max) {
696 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
697 		if (ret) {
698 			NV_INFO(dev, "nv50cal_space: %d\n", ret);
699 			goto out;
700 		}
701 
702 		for (i = 0; i < req->nr_push; i++) {
703 			struct nouveau_bo *nvbo = (void *)(unsigned long)
704 				bo[push[i].bo_index].user_priv;
705 
706 			nv50_dma_push(chan, nvbo, push[i].offset,
707 				      push[i].length);
708 		}
709 	} else
710 	if (dev_priv->chipset >= 0x25) {
711 		ret = RING_SPACE(chan, req->nr_push * 2);
712 		if (ret) {
713 			NV_ERROR(dev, "cal_space: %d\n", ret);
714 			goto out;
715 		}
716 
717 		for (i = 0; i < req->nr_push; i++) {
718 			struct nouveau_bo *nvbo = (void *)(unsigned long)
719 				bo[push[i].bo_index].user_priv;
720 			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
721 
722 			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
723 					push[i].offset) | 2);
724 			OUT_RING(chan, 0);
725 		}
726 	} else {
727 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
728 		if (ret) {
729 			NV_ERROR(dev, "jmp_space: %d\n", ret);
730 			goto out;
731 		}
732 
733 		for (i = 0; i < req->nr_push; i++) {
734 			struct nouveau_bo *nvbo = (void *)(unsigned long)
735 				bo[push[i].bo_index].user_priv;
736 			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737 			uint32_t cmd;
738 
739 			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
740 			cmd |= 0x20000000;
741 			if (unlikely(cmd != req->suffix0)) {
742 				if (!nvbo->kmap.virtual) {
743 					ret = ttm_bo_kmap(&nvbo->bo, 0,
744 							  nvbo->bo.mem.
745 							  num_pages,
746 							  &nvbo->kmap);
747 					if (ret) {
748 						WIND_RING(chan);
749 						goto out;
750 					}
751 					nvbo->validate_mapped = true;
752 				}
753 
754 				nouveau_bo_wr32(nvbo, (push[i].offset +
755 						push[i].length - 8) / 4, cmd);
756 			}
757 
758 			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
759 					push[i].offset) | 0x20000000);
760 			OUT_RING(chan, 0);
761 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
762 				OUT_RING(chan, 0);
763 		}
764 	}
765 
766 	ret = nouveau_fence_new(chan, &fence, true);
767 	if (ret) {
768 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
769 		WIND_RING(chan);
770 		goto out;
771 	}
772 
773 out:
774 	validate_fini(&op, fence);
775 	nouveau_fence_unref(&fence);
776 
777 out_prevalid:
778 	kfree(bo);
779 	kfree(push);
780 
781 out_next:
782 	if (chan->dma.ib_max) {
783 		req->suffix0 = 0x00000000;
784 		req->suffix1 = 0x00000000;
785 	} else
786 	if (dev_priv->chipset >= 0x25) {
787 		req->suffix0 = 0x00020000;
788 		req->suffix1 = 0x00000000;
789 	} else {
790 		req->suffix0 = 0x20000000 |
791 			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
792 		req->suffix1 = 0x00000000;
793 	}
794 
795 	nouveau_channel_put(&chan);
796 	return ret;
797 }
798 
799 static inline uint32_t
800 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
801 {
802 	uint32_t flags = 0;
803 
804 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
805 		flags |= TTM_PL_FLAG_VRAM;
806 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
807 		flags |= TTM_PL_FLAG_TT;
808 
809 	return flags;
810 }
811 
812 int
813 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
814 			   struct drm_file *file_priv)
815 {
816 	struct drm_nouveau_gem_cpu_prep *req = data;
817 	struct drm_gem_object *gem;
818 	struct nouveau_bo *nvbo;
819 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
820 	int ret = -EINVAL;
821 
822 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
823 	if (!gem)
824 		return -ENOENT;
825 	nvbo = nouveau_gem_object(gem);
826 
827 	spin_lock(&nvbo->bo.bdev->fence_lock);
828 	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
829 	spin_unlock(&nvbo->bo.bdev->fence_lock);
830 	drm_gem_object_unreference_unlocked(gem);
831 	return ret;
832 }
833 
834 int
835 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
836 			   struct drm_file *file_priv)
837 {
838 	return 0;
839 }
840 
841 int
842 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
843 		       struct drm_file *file_priv)
844 {
845 	struct drm_nouveau_gem_info *req = data;
846 	struct drm_gem_object *gem;
847 	int ret;
848 
849 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
850 	if (!gem)
851 		return -ENOENT;
852 
853 	ret = nouveau_gem_info(file_priv, gem, req);
854 	drm_gem_object_unreference_unlocked(gem);
855 	return ret;
856 }
857 
858