1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/fifo.h>
27 #include <nvif/ioctl.h>
28 #include <nvif/class.h>
29 #include <nvif/cl0002.h>
30 #include <nvif/unpack.h>
31 
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_exec.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_chan.h"
37 #include "nouveau_abi16.h"
38 #include "nouveau_vmm.h"
39 #include "nouveau_sched.h"
40 
41 static struct nouveau_abi16 *
nouveau_abi16(struct drm_file * file_priv)42 nouveau_abi16(struct drm_file *file_priv)
43 {
44 	struct nouveau_cli *cli = nouveau_cli(file_priv);
45 	if (!cli->abi16) {
46 		struct nouveau_abi16 *abi16;
47 		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 		if (cli->abi16) {
49 			struct nv_device_v0 args = {
50 				.device = ~0ULL,
51 			};
52 
53 			INIT_LIST_HEAD(&abi16->channels);
54 
55 			/* allocate device object targeting client's default
56 			 * device (ie. the one that belongs to the fd it
57 			 * opened)
58 			 */
59 			if (nvif_device_ctor(&cli->base.object, "abi16Device",
60 					     0, NV_DEVICE, &args, sizeof(args),
61 					     &abi16->device) == 0)
62 				return cli->abi16;
63 
64 			kfree(cli->abi16);
65 			cli->abi16 = NULL;
66 		}
67 	}
68 	return cli->abi16;
69 }
70 
71 struct nouveau_abi16 *
nouveau_abi16_get(struct drm_file * file_priv)72 nouveau_abi16_get(struct drm_file *file_priv)
73 {
74 	struct nouveau_cli *cli = nouveau_cli(file_priv);
75 	mutex_lock(&cli->mutex);
76 	if (nouveau_abi16(file_priv))
77 		return cli->abi16;
78 	mutex_unlock(&cli->mutex);
79 	return NULL;
80 }
81 
82 int
nouveau_abi16_put(struct nouveau_abi16 * abi16,int ret)83 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84 {
85 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
86 	mutex_unlock(&cli->mutex);
87 	return ret;
88 }
89 
90 s32
nouveau_abi16_swclass(struct nouveau_drm * drm)91 nouveau_abi16_swclass(struct nouveau_drm *drm)
92 {
93 	switch (drm->client.device.info.family) {
94 	case NV_DEVICE_INFO_V0_TNT:
95 		return NVIF_CLASS_SW_NV04;
96 	case NV_DEVICE_INFO_V0_CELSIUS:
97 	case NV_DEVICE_INFO_V0_KELVIN:
98 	case NV_DEVICE_INFO_V0_RANKINE:
99 	case NV_DEVICE_INFO_V0_CURIE:
100 		return NVIF_CLASS_SW_NV10;
101 	case NV_DEVICE_INFO_V0_TESLA:
102 		return NVIF_CLASS_SW_NV50;
103 	case NV_DEVICE_INFO_V0_FERMI:
104 	case NV_DEVICE_INFO_V0_KEPLER:
105 	case NV_DEVICE_INFO_V0_MAXWELL:
106 	case NV_DEVICE_INFO_V0_PASCAL:
107 	case NV_DEVICE_INFO_V0_VOLTA:
108 		return NVIF_CLASS_SW_GF100;
109 	}
110 
111 	return 0x0000;
112 }
113 
114 static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan * chan,struct nouveau_abi16_ntfy * ntfy)115 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
116 			struct nouveau_abi16_ntfy *ntfy)
117 {
118 	nvif_object_dtor(&ntfy->object);
119 	nvkm_mm_free(&chan->heap, &ntfy->node);
120 	list_del(&ntfy->head);
121 	kfree(ntfy);
122 }
123 
124 static void
nouveau_abi16_chan_fini(struct nouveau_abi16 * abi16,struct nouveau_abi16_chan * chan)125 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
126 			struct nouveau_abi16_chan *chan)
127 {
128 	struct nouveau_abi16_ntfy *ntfy, *temp;
129 
130 	/* When a client exits without waiting for it's queued up jobs to
131 	 * finish it might happen that we fault the channel. This is due to
132 	 * drm_file_free() calling drm_gem_release() before the postclose()
133 	 * callback. Hence, we can't tear down this scheduler entity before
134 	 * uvmm mappings are unmapped. Currently, we can't detect this case.
135 	 *
136 	 * However, this should be rare and harmless, since the channel isn't
137 	 * needed anymore.
138 	 */
139 	nouveau_sched_entity_fini(&chan->sched_entity);
140 
141 	/* wait for all activity to stop before cleaning up */
142 	if (chan->chan)
143 		nouveau_channel_idle(chan->chan);
144 
145 	/* cleanup notifier state */
146 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
147 		nouveau_abi16_ntfy_fini(chan, ntfy);
148 	}
149 
150 	if (chan->ntfy) {
151 		nouveau_vma_del(&chan->ntfy_vma);
152 		nouveau_bo_unpin(chan->ntfy);
153 		drm_gem_object_put(&chan->ntfy->bo.base);
154 	}
155 
156 	if (chan->heap.block_size)
157 		nvkm_mm_fini(&chan->heap);
158 
159 	/* destroy channel object, all children will be killed too */
160 	if (chan->chan) {
161 		nvif_object_dtor(&chan->ce);
162 		nouveau_channel_del(&chan->chan);
163 	}
164 
165 	list_del(&chan->head);
166 	kfree(chan);
167 }
168 
169 void
nouveau_abi16_fini(struct nouveau_abi16 * abi16)170 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
171 {
172 	struct nouveau_cli *cli = (void *)abi16->device.object.client;
173 	struct nouveau_abi16_chan *chan, *temp;
174 
175 	/* cleanup channels */
176 	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
177 		nouveau_abi16_chan_fini(abi16, chan);
178 	}
179 
180 	/* destroy the device object */
181 	nvif_device_dtor(&abi16->device);
182 
183 	kfree(cli->abi16);
184 	cli->abi16 = NULL;
185 }
186 
187 static inline int
getparam_dma_ib_max(struct nvif_device * device)188 getparam_dma_ib_max(struct nvif_device *device)
189 {
190 	const struct nvif_mclass dmas[] = {
191 		{ NV03_CHANNEL_DMA, 0 },
192 		{ NV10_CHANNEL_DMA, 0 },
193 		{ NV17_CHANNEL_DMA, 0 },
194 		{ NV40_CHANNEL_DMA, 0 },
195 		{}
196 	};
197 
198 	return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
199 }
200 
201 int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)202 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
203 {
204 	struct nouveau_cli *cli = nouveau_cli(file_priv);
205 	struct nouveau_drm *drm = nouveau_drm(dev);
206 	struct nvif_device *device = &drm->client.device;
207 	struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
208 	struct nvkm_gr *gr = nvxx_gr(device);
209 	struct drm_nouveau_getparam *getparam = data;
210 	struct pci_dev *pdev = to_pci_dev(dev->dev);
211 
212 	switch (getparam->param) {
213 	case NOUVEAU_GETPARAM_CHIPSET_ID:
214 		getparam->value = device->info.chipset;
215 		break;
216 	case NOUVEAU_GETPARAM_PCI_VENDOR:
217 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
218 			getparam->value = pdev->vendor;
219 		else
220 			getparam->value = 0;
221 		break;
222 	case NOUVEAU_GETPARAM_PCI_DEVICE:
223 		if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
224 			getparam->value = pdev->device;
225 		else
226 			getparam->value = 0;
227 		break;
228 	case NOUVEAU_GETPARAM_BUS_TYPE:
229 		switch (device->info.platform) {
230 		case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
231 		case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
232 		case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
233 		case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
234 		case NV_DEVICE_INFO_V0_IGP :
235 			if (!pci_is_pcie(pdev))
236 				getparam->value = 1;
237 			else
238 				getparam->value = 2;
239 			break;
240 		default:
241 			WARN_ON(1);
242 			break;
243 		}
244 		break;
245 	case NOUVEAU_GETPARAM_FB_SIZE:
246 		getparam->value = drm->gem.vram_available;
247 		break;
248 	case NOUVEAU_GETPARAM_AGP_SIZE:
249 		getparam->value = drm->gem.gart_available;
250 		break;
251 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
252 		getparam->value = 0; /* deprecated */
253 		break;
254 	case NOUVEAU_GETPARAM_PTIMER_TIME:
255 		getparam->value = nvif_device_time(device);
256 		break;
257 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
258 		getparam->value = 1;
259 		break;
260 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
261 		getparam->value = 1;
262 		break;
263 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
264 		getparam->value = nvkm_gr_units(gr);
265 		break;
266 	case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
267 		int ib_max = getparam_dma_ib_max(device);
268 
269 		getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
270 		break;
271 	}
272 	case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
273 		getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
274 		break;
275 	case NOUVEAU_GETPARAM_VRAM_USED: {
276 		struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
277 		getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
278 		break;
279 	}
280 	case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
281 		getparam->value = 1;
282 		break;
283 	default:
284 		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
285 		return -EINVAL;
286 	}
287 
288 	return 0;
289 }
290 
291 int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)292 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
293 {
294 	struct drm_nouveau_channel_alloc *init = data;
295 	struct nouveau_cli *cli = nouveau_cli(file_priv);
296 	struct nouveau_drm *drm = nouveau_drm(dev);
297 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
298 	struct nouveau_abi16_chan *chan;
299 	struct nvif_device *device;
300 	u64 engine, runm;
301 	int ret;
302 
303 	if (unlikely(!abi16))
304 		return -ENOMEM;
305 
306 	if (!drm->channel)
307 		return nouveau_abi16_put(abi16, -ENODEV);
308 
309 	/* If uvmm wasn't initialized until now disable it completely to prevent
310 	 * userspace from mixing up UAPIs.
311 	 *
312 	 * The client lock is already acquired by nouveau_abi16_get().
313 	 */
314 	__nouveau_cli_disable_uvmm_noinit(cli);
315 
316 	device = &abi16->device;
317 	engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
318 
319 	/* hack to allow channel engine type specification on kepler */
320 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
321 		if (init->fb_ctxdma_handle == ~0) {
322 			switch (init->tt_ctxdma_handle) {
323 			case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR    ; break;
324 			case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
325 			case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
326 			case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
327 			case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE    ; break;
328 			default:
329 				return nouveau_abi16_put(abi16, -ENOSYS);
330 			}
331 
332 			init->fb_ctxdma_handle = 0;
333 			init->tt_ctxdma_handle = 0;
334 		}
335 	}
336 
337 	if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
338 		runm = nvif_fifo_runlist(device, engine);
339 	else
340 		runm = nvif_fifo_runlist_ce(device);
341 
342 	if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
343 		return nouveau_abi16_put(abi16, -EINVAL);
344 
345 	/* allocate "abi16 channel" data and make up a handle for it */
346 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
347 	if (!chan)
348 		return nouveau_abi16_put(abi16, -ENOMEM);
349 
350 	INIT_LIST_HEAD(&chan->notifiers);
351 	list_add(&chan->head, &abi16->channels);
352 
353 	/* create channel object and initialise dma and fence management */
354 	ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
355 				  init->tt_ctxdma_handle, &chan->chan);
356 	if (ret)
357 		goto done;
358 
359 	ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
360 					drm->sched_wq);
361 	if (ret)
362 		goto done;
363 
364 	init->channel = chan->chan->chid;
365 
366 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
367 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
368 					NOUVEAU_GEM_DOMAIN_GART;
369 	else
370 	if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
371 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
372 	else
373 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
374 
375 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
376 		init->subchan[0].handle = 0x00000000;
377 		init->subchan[0].grclass = 0x0000;
378 		init->subchan[1].handle = chan->chan->nvsw.handle;
379 		init->subchan[1].grclass = 0x506e;
380 		init->nr_subchan = 2;
381 	}
382 
383 	/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
384 	 * Kepler and above.  NVKM no longer always sets CE_CTX_VALID as part of
385 	 * channel init, now we know what that stuff actually is.
386 	 *
387 	 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
388 	 *
389 	 * Userspace was fixed prior to adding Ampere support.
390 	 */
391 	switch (device->info.family) {
392 	case NV_DEVICE_INFO_V0_VOLTA:
393 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
394 				       NULL, 0, &chan->ce);
395 		if (ret)
396 			goto done;
397 		break;
398 	case NV_DEVICE_INFO_V0_TURING:
399 		ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
400 				       NULL, 0, &chan->ce);
401 		if (ret)
402 			goto done;
403 		break;
404 	default:
405 		break;
406 	}
407 
408 	/* Named memory object area */
409 	ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
410 			      0, 0, &chan->ntfy);
411 	if (ret == 0)
412 		ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
413 				     false);
414 	if (ret)
415 		goto done;
416 
417 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
418 		ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
419 				      &chan->ntfy_vma);
420 		if (ret)
421 			goto done;
422 	}
423 
424 	ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
425 				    &init->notifier_handle);
426 	if (ret)
427 		goto done;
428 
429 	ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
430 done:
431 	if (ret)
432 		nouveau_abi16_chan_fini(abi16, chan);
433 	return nouveau_abi16_put(abi16, ret);
434 }
435 
436 static struct nouveau_abi16_chan *
nouveau_abi16_chan(struct nouveau_abi16 * abi16,int channel)437 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
438 {
439 	struct nouveau_abi16_chan *chan;
440 
441 	list_for_each_entry(chan, &abi16->channels, head) {
442 		if (chan->chan->chid == channel)
443 			return chan;
444 	}
445 
446 	return NULL;
447 }
448 
449 int
nouveau_abi16_usif(struct drm_file * file_priv,void * data,u32 size)450 nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
451 {
452 	union {
453 		struct nvif_ioctl_v0 v0;
454 	} *args = data;
455 	struct nouveau_abi16_chan *chan;
456 	struct nouveau_abi16 *abi16;
457 	int ret = -ENOSYS;
458 
459 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
460 		switch (args->v0.type) {
461 		case NVIF_IOCTL_V0_NEW:
462 		case NVIF_IOCTL_V0_MTHD:
463 		case NVIF_IOCTL_V0_SCLASS:
464 			break;
465 		default:
466 			return -EACCES;
467 		}
468 	} else
469 		return ret;
470 
471 	if (!(abi16 = nouveau_abi16(file_priv)))
472 		return -ENOMEM;
473 
474 	if (args->v0.token != ~0ULL) {
475 		if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
476 			return -EINVAL;
477 		args->v0.object = nvif_handle(&chan->chan->user);
478 		args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
479 		return 0;
480 	}
481 
482 	args->v0.object = nvif_handle(&abi16->device.object);
483 	args->v0.owner  = NVIF_IOCTL_V0_OWNER_ANY;
484 	return 0;
485 }
486 
487 int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)488 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
489 {
490 	struct drm_nouveau_channel_free *req = data;
491 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
492 	struct nouveau_abi16_chan *chan;
493 
494 	if (unlikely(!abi16))
495 		return -ENOMEM;
496 
497 	chan = nouveau_abi16_chan(abi16, req->channel);
498 	if (!chan)
499 		return nouveau_abi16_put(abi16, -ENOENT);
500 	nouveau_abi16_chan_fini(abi16, chan);
501 	return nouveau_abi16_put(abi16, 0);
502 }
503 
504 int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)505 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
506 {
507 	struct drm_nouveau_grobj_alloc *init = data;
508 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
509 	struct nouveau_abi16_chan *chan;
510 	struct nouveau_abi16_ntfy *ntfy;
511 	struct nvif_client *client;
512 	struct nvif_sclass *sclass;
513 	s32 oclass = 0;
514 	int ret, i;
515 
516 	if (unlikely(!abi16))
517 		return -ENOMEM;
518 
519 	if (init->handle == ~0)
520 		return nouveau_abi16_put(abi16, -EINVAL);
521 	client = abi16->device.object.client;
522 
523 	chan = nouveau_abi16_chan(abi16, init->channel);
524 	if (!chan)
525 		return nouveau_abi16_put(abi16, -ENOENT);
526 
527 	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
528 	if (ret < 0)
529 		return nouveau_abi16_put(abi16, ret);
530 
531 	if ((init->class & 0x00ff) == 0x006e) {
532 		/* nvsw: compatibility with older 0x*6e class identifier */
533 		for (i = 0; !oclass && i < ret; i++) {
534 			switch (sclass[i].oclass) {
535 			case NVIF_CLASS_SW_NV04:
536 			case NVIF_CLASS_SW_NV10:
537 			case NVIF_CLASS_SW_NV50:
538 			case NVIF_CLASS_SW_GF100:
539 				oclass = sclass[i].oclass;
540 				break;
541 			default:
542 				break;
543 			}
544 		}
545 	} else
546 	if ((init->class & 0x00ff) == 0x00b1) {
547 		/* msvld: compatibility with incorrect version exposure */
548 		for (i = 0; i < ret; i++) {
549 			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
550 				oclass = sclass[i].oclass;
551 				break;
552 			}
553 		}
554 	} else
555 	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
556 		/* mspdec: compatibility with incorrect version exposure */
557 		for (i = 0; i < ret; i++) {
558 			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
559 				oclass = sclass[i].oclass;
560 				break;
561 			}
562 		}
563 	} else
564 	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
565 		/* msppp: compatibility with incorrect version exposure */
566 		for (i = 0; i < ret; i++) {
567 			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
568 				oclass = sclass[i].oclass;
569 				break;
570 			}
571 		}
572 	} else {
573 		oclass = init->class;
574 	}
575 
576 	nvif_object_sclass_put(&sclass);
577 	if (!oclass)
578 		return nouveau_abi16_put(abi16, -EINVAL);
579 
580 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
581 	if (!ntfy)
582 		return nouveau_abi16_put(abi16, -ENOMEM);
583 
584 	list_add(&ntfy->head, &chan->notifiers);
585 
586 	client->route = NVDRM_OBJECT_ABI16;
587 	ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
588 			       oclass, NULL, 0, &ntfy->object);
589 	client->route = NVDRM_OBJECT_NVIF;
590 
591 	if (ret)
592 		nouveau_abi16_ntfy_fini(chan, ntfy);
593 	return nouveau_abi16_put(abi16, ret);
594 }
595 
596 int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)597 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
598 {
599 	struct drm_nouveau_notifierobj_alloc *info = data;
600 	struct nouveau_drm *drm = nouveau_drm(dev);
601 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
602 	struct nouveau_abi16_chan *chan;
603 	struct nouveau_abi16_ntfy *ntfy;
604 	struct nvif_device *device = &abi16->device;
605 	struct nvif_client *client;
606 	struct nv_dma_v0 args = {};
607 	int ret;
608 
609 	if (unlikely(!abi16))
610 		return -ENOMEM;
611 
612 	/* completely unnecessary for these chipsets... */
613 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
614 		return nouveau_abi16_put(abi16, -EINVAL);
615 	client = abi16->device.object.client;
616 
617 	chan = nouveau_abi16_chan(abi16, info->channel);
618 	if (!chan)
619 		return nouveau_abi16_put(abi16, -ENOENT);
620 
621 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
622 	if (!ntfy)
623 		return nouveau_abi16_put(abi16, -ENOMEM);
624 
625 	list_add(&ntfy->head, &chan->notifiers);
626 
627 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
628 			   &ntfy->node);
629 	if (ret)
630 		goto done;
631 
632 	args.start = ntfy->node->offset;
633 	args.limit = ntfy->node->offset + ntfy->node->length - 1;
634 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
635 		args.target = NV_DMA_V0_TARGET_VM;
636 		args.access = NV_DMA_V0_ACCESS_VM;
637 		args.start += chan->ntfy_vma->addr;
638 		args.limit += chan->ntfy_vma->addr;
639 	} else
640 	if (drm->agp.bridge) {
641 		args.target = NV_DMA_V0_TARGET_AGP;
642 		args.access = NV_DMA_V0_ACCESS_RDWR;
643 		args.start += drm->agp.base + chan->ntfy->offset;
644 		args.limit += drm->agp.base + chan->ntfy->offset;
645 	} else {
646 		args.target = NV_DMA_V0_TARGET_VM;
647 		args.access = NV_DMA_V0_ACCESS_RDWR;
648 		args.start += chan->ntfy->offset;
649 		args.limit += chan->ntfy->offset;
650 	}
651 
652 	client->route = NVDRM_OBJECT_ABI16;
653 	ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
654 			       NV_DMA_IN_MEMORY, &args, sizeof(args),
655 			       &ntfy->object);
656 	client->route = NVDRM_OBJECT_NVIF;
657 	if (ret)
658 		goto done;
659 
660 	info->offset = ntfy->node->offset;
661 done:
662 	if (ret)
663 		nouveau_abi16_ntfy_fini(chan, ntfy);
664 	return nouveau_abi16_put(abi16, ret);
665 }
666 
667 int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)668 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
669 {
670 	struct drm_nouveau_gpuobj_free *fini = data;
671 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
672 	struct nouveau_abi16_chan *chan;
673 	struct nouveau_abi16_ntfy *ntfy;
674 	int ret = -ENOENT;
675 
676 	if (unlikely(!abi16))
677 		return -ENOMEM;
678 
679 	chan = nouveau_abi16_chan(abi16, fini->channel);
680 	if (!chan)
681 		return nouveau_abi16_put(abi16, -EINVAL);
682 
683 	/* synchronize with the user channel and destroy the gpu object */
684 	nouveau_channel_idle(chan->chan);
685 
686 	list_for_each_entry(ntfy, &chan->notifiers, head) {
687 		if (ntfy->object.handle == fini->handle) {
688 			nouveau_abi16_ntfy_fini(chan, ntfy);
689 			ret = 0;
690 			break;
691 		}
692 	}
693 
694 	return nouveau_abi16_put(abi16, ret);
695 }
696