1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/ioctl.h>
27 #include <nvif/class.h>
28 
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_gem.h"
32 #include "nouveau_chan.h"
33 #include "nouveau_abi16.h"
34 
35 struct nouveau_abi16 *
36 nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
37 {
38 	struct nouveau_cli *cli = nouveau_cli(file_priv);
39 	mutex_lock(&cli->mutex);
40 	if (!cli->abi16) {
41 		struct nouveau_abi16 *abi16;
42 		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
43 		if (cli->abi16) {
44 			struct nv_device_v0 args = {
45 				.device = ~0ULL,
46 			};
47 
48 			INIT_LIST_HEAD(&abi16->channels);
49 
50 			/* allocate device object targeting client's default
51 			 * device (ie. the one that belongs to the fd it
52 			 * opened)
53 			 */
54 			if (nvif_device_init(&cli->base.base, NULL,
55 					     NOUVEAU_ABI16_DEVICE, NV_DEVICE,
56 					     &args, sizeof(args),
57 					     &abi16->device) == 0)
58 				return cli->abi16;
59 
60 			kfree(cli->abi16);
61 			cli->abi16 = NULL;
62 		}
63 
64 		mutex_unlock(&cli->mutex);
65 	}
66 	return cli->abi16;
67 }
68 
69 int
70 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
71 {
72 	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
73 	mutex_unlock(&cli->mutex);
74 	return ret;
75 }
76 
77 u16
78 nouveau_abi16_swclass(struct nouveau_drm *drm)
79 {
80 	switch (drm->device.info.family) {
81 	case NV_DEVICE_INFO_V0_TNT:
82 		return 0x006e;
83 	case NV_DEVICE_INFO_V0_CELSIUS:
84 	case NV_DEVICE_INFO_V0_KELVIN:
85 	case NV_DEVICE_INFO_V0_RANKINE:
86 	case NV_DEVICE_INFO_V0_CURIE:
87 		return 0x016e;
88 	case NV_DEVICE_INFO_V0_TESLA:
89 		return 0x506e;
90 	case NV_DEVICE_INFO_V0_FERMI:
91 	case NV_DEVICE_INFO_V0_KEPLER:
92 	case NV_DEVICE_INFO_V0_MAXWELL:
93 		return 0x906e;
94 	}
95 
96 	return 0x0000;
97 }
98 
99 static void
100 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
101 			struct nouveau_abi16_ntfy *ntfy)
102 {
103 	nvkm_mm_free(&chan->heap, &ntfy->node);
104 	list_del(&ntfy->head);
105 	kfree(ntfy);
106 }
107 
108 static void
109 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
110 			struct nouveau_abi16_chan *chan)
111 {
112 	struct nouveau_abi16_ntfy *ntfy, *temp;
113 
114 	/* wait for all activity to stop before releasing notify object, which
115 	 * may be still in use */
116 	if (chan->chan && chan->ntfy)
117 		nouveau_channel_idle(chan->chan);
118 
119 	/* cleanup notifier state */
120 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
121 		nouveau_abi16_ntfy_fini(chan, ntfy);
122 	}
123 
124 	if (chan->ntfy) {
125 		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
126 		nouveau_bo_unpin(chan->ntfy);
127 		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
128 	}
129 
130 	if (chan->heap.block_size)
131 		nvkm_mm_fini(&chan->heap);
132 
133 	/* destroy channel object, all children will be killed too */
134 	if (chan->chan) {
135 		abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
136 		nouveau_channel_del(&chan->chan);
137 	}
138 
139 	list_del(&chan->head);
140 	kfree(chan);
141 }
142 
143 void
144 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
145 {
146 	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
147 	struct nouveau_abi16_chan *chan, *temp;
148 
149 	/* cleanup channels */
150 	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
151 		nouveau_abi16_chan_fini(abi16, chan);
152 	}
153 
154 	/* destroy the device object */
155 	nvif_device_fini(&abi16->device);
156 
157 	kfree(cli->abi16);
158 	cli->abi16 = NULL;
159 }
160 
161 int
162 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
163 {
164 	struct nouveau_cli *cli = nouveau_cli(file_priv);
165 	struct nouveau_drm *drm = nouveau_drm(dev);
166 	struct nvif_device *device = &drm->device;
167 	struct nvkm_gr *gr = nvxx_gr(device);
168 	struct drm_nouveau_getparam *getparam = data;
169 
170 	switch (getparam->param) {
171 	case NOUVEAU_GETPARAM_CHIPSET_ID:
172 		getparam->value = device->info.chipset;
173 		break;
174 	case NOUVEAU_GETPARAM_PCI_VENDOR:
175 		if (nv_device_is_pci(nvxx_device(device)))
176 			getparam->value = dev->pdev->vendor;
177 		else
178 			getparam->value = 0;
179 		break;
180 	case NOUVEAU_GETPARAM_PCI_DEVICE:
181 		if (nv_device_is_pci(nvxx_device(device)))
182 			getparam->value = dev->pdev->device;
183 		else
184 			getparam->value = 0;
185 		break;
186 	case NOUVEAU_GETPARAM_BUS_TYPE:
187 		if (!nv_device_is_pci(nvxx_device(device)))
188 			getparam->value = 3;
189 		else
190 		if (drm_pci_device_is_agp(dev))
191 			getparam->value = 0;
192 		else
193 		if (!pci_is_pcie(dev->pdev))
194 			getparam->value = 1;
195 		else
196 			getparam->value = 2;
197 		break;
198 	case NOUVEAU_GETPARAM_FB_SIZE:
199 		getparam->value = drm->gem.vram_available;
200 		break;
201 	case NOUVEAU_GETPARAM_AGP_SIZE:
202 		getparam->value = drm->gem.gart_available;
203 		break;
204 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
205 		getparam->value = 0; /* deprecated */
206 		break;
207 	case NOUVEAU_GETPARAM_PTIMER_TIME:
208 		getparam->value = nvif_device_time(device);
209 		break;
210 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
211 		getparam->value = 1;
212 		break;
213 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
214 		getparam->value = 1;
215 		break;
216 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
217 		getparam->value = gr->units ? gr->units(gr) : 0;
218 		break;
219 	default:
220 		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
221 		return -EINVAL;
222 	}
223 
224 	return 0;
225 }
226 
227 int
228 nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
229 {
230 	return -EINVAL;
231 }
232 
233 int
234 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
235 {
236 	struct drm_nouveau_channel_alloc *init = data;
237 	struct nouveau_cli *cli = nouveau_cli(file_priv);
238 	struct nouveau_drm *drm = nouveau_drm(dev);
239 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
240 	struct nouveau_abi16_chan *chan;
241 	struct nvif_device *device;
242 	int ret;
243 
244 	if (unlikely(!abi16))
245 		return -ENOMEM;
246 
247 	if (!drm->channel)
248 		return nouveau_abi16_put(abi16, -ENODEV);
249 
250 	device = &abi16->device;
251 
252 	/* hack to allow channel engine type specification on kepler */
253 	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
254 		if (init->fb_ctxdma_handle != ~0)
255 			init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
256 		else
257 			init->fb_ctxdma_handle = init->tt_ctxdma_handle;
258 
259 		/* allow flips to be executed if this is a graphics channel */
260 		init->tt_ctxdma_handle = 0;
261 		if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
262 			init->tt_ctxdma_handle = 1;
263 	}
264 
265 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
266 		return nouveau_abi16_put(abi16, -EINVAL);
267 
268 	/* allocate "abi16 channel" data and make up a handle for it */
269 	init->channel = __ffs64(~abi16->handles);
270 	if (~abi16->handles == 0)
271 		return nouveau_abi16_put(abi16, -ENOSPC);
272 
273 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
274 	if (!chan)
275 		return nouveau_abi16_put(abi16, -ENOMEM);
276 
277 	INIT_LIST_HEAD(&chan->notifiers);
278 	list_add(&chan->head, &abi16->channels);
279 	abi16->handles |= (1ULL << init->channel);
280 
281 	/* create channel object and initialise dma and fence management */
282 	ret = nouveau_channel_new(drm, device,
283 				  NOUVEAU_ABI16_CHAN(init->channel),
284 				  init->fb_ctxdma_handle,
285 				  init->tt_ctxdma_handle, &chan->chan);
286 	if (ret)
287 		goto done;
288 
289 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
290 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
291 					NOUVEAU_GEM_DOMAIN_GART;
292 	else
293 	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
294 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
295 	else
296 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
297 
298 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
299 		init->subchan[0].handle = 0x00000000;
300 		init->subchan[0].grclass = 0x0000;
301 		init->subchan[1].handle = chan->chan->nvsw.handle;
302 		init->subchan[1].grclass = 0x506e;
303 		init->nr_subchan = 2;
304 	}
305 
306 	/* Named memory object area */
307 	ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
308 			      0, 0, &chan->ntfy);
309 	if (ret == 0)
310 		ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
311 	if (ret)
312 		goto done;
313 
314 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
315 		ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
316 					&chan->ntfy_vma);
317 		if (ret)
318 			goto done;
319 	}
320 
321 	ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
322 				    &init->notifier_handle);
323 	if (ret)
324 		goto done;
325 
326 	ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
327 done:
328 	if (ret)
329 		nouveau_abi16_chan_fini(abi16, chan);
330 	return nouveau_abi16_put(abi16, ret);
331 }
332 
333 static struct nouveau_abi16_chan *
334 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
335 {
336 	struct nouveau_abi16_chan *chan;
337 
338 	list_for_each_entry(chan, &abi16->channels, head) {
339 		if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
340 			return chan;
341 	}
342 
343 	return NULL;
344 }
345 
346 int
347 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
348 {
349 	struct drm_nouveau_channel_free *req = data;
350 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
351 	struct nouveau_abi16_chan *chan;
352 
353 	if (unlikely(!abi16))
354 		return -ENOMEM;
355 
356 	chan = nouveau_abi16_chan(abi16, req->channel);
357 	if (!chan)
358 		return nouveau_abi16_put(abi16, -ENOENT);
359 	nouveau_abi16_chan_fini(abi16, chan);
360 	return nouveau_abi16_put(abi16, 0);
361 }
362 
363 int
364 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
365 {
366 	struct drm_nouveau_grobj_alloc *init = data;
367 	struct {
368 		struct nvif_ioctl_v0 ioctl;
369 		struct nvif_ioctl_new_v0 new;
370 	} args = {
371 		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
372 		.ioctl.type = NVIF_IOCTL_V0_NEW,
373 		.ioctl.path_nr = 3,
374 		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
375 		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
376 		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
377 		.new.route = NVDRM_OBJECT_ABI16,
378 		.new.handle = init->handle,
379 		.new.oclass = init->class,
380 	};
381 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
382 	struct nouveau_drm *drm = nouveau_drm(dev);
383 	struct nvif_client *client;
384 	int ret;
385 
386 	if (unlikely(!abi16))
387 		return -ENOMEM;
388 
389 	if (init->handle == ~0)
390 		return nouveau_abi16_put(abi16, -EINVAL);
391 	client = nvif_client(nvif_object(&abi16->device));
392 
393 	/* compatibility with userspace that assumes 506e for all chipsets */
394 	if (init->class == 0x506e) {
395 		init->class = nouveau_abi16_swclass(drm);
396 		if (init->class == 0x906e)
397 			return nouveau_abi16_put(abi16, 0);
398 	}
399 
400 	ret = nvif_client_ioctl(client, &args, sizeof(args));
401 	return nouveau_abi16_put(abi16, ret);
402 }
403 
404 int
405 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
406 {
407 	struct drm_nouveau_notifierobj_alloc *info = data;
408 	struct {
409 		struct nvif_ioctl_v0 ioctl;
410 		struct nvif_ioctl_new_v0 new;
411 		struct nv_dma_v0 ctxdma;
412 	} args = {
413 		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
414 		.ioctl.type = NVIF_IOCTL_V0_NEW,
415 		.ioctl.path_nr = 3,
416 		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
417 		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
418 		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
419 		.new.route = NVDRM_OBJECT_ABI16,
420 		.new.handle = info->handle,
421 		.new.oclass = NV_DMA_IN_MEMORY,
422 	};
423 	struct nouveau_drm *drm = nouveau_drm(dev);
424 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
425 	struct nouveau_abi16_chan *chan;
426 	struct nouveau_abi16_ntfy *ntfy;
427 	struct nvif_device *device = &abi16->device;
428 	struct nvif_client *client;
429 	int ret;
430 
431 	if (unlikely(!abi16))
432 		return -ENOMEM;
433 
434 	/* completely unnecessary for these chipsets... */
435 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
436 		return nouveau_abi16_put(abi16, -EINVAL);
437 	client = nvif_client(nvif_object(&abi16->device));
438 
439 	chan = nouveau_abi16_chan(abi16, info->channel);
440 	if (!chan)
441 		return nouveau_abi16_put(abi16, -ENOENT);
442 
443 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
444 	if (!ntfy)
445 		return nouveau_abi16_put(abi16, -ENOMEM);
446 
447 	list_add(&ntfy->head, &chan->notifiers);
448 	ntfy->handle = info->handle;
449 
450 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
451 			   &ntfy->node);
452 	if (ret)
453 		goto done;
454 
455 	args.ctxdma.start = ntfy->node->offset;
456 	args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
457 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
458 		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
459 		args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
460 		args.ctxdma.start += chan->ntfy_vma.offset;
461 		args.ctxdma.limit += chan->ntfy_vma.offset;
462 	} else
463 	if (drm->agp.stat == ENABLED) {
464 		args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
465 		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
466 		args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
467 		args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
468 		client->super = true;
469 	} else {
470 		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
471 		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
472 		args.ctxdma.start += chan->ntfy->bo.offset;
473 		args.ctxdma.limit += chan->ntfy->bo.offset;
474 	}
475 
476 	ret = nvif_client_ioctl(client, &args, sizeof(args));
477 	client->super = false;
478 	if (ret)
479 		goto done;
480 
481 	info->offset = ntfy->node->offset;
482 
483 done:
484 	if (ret)
485 		nouveau_abi16_ntfy_fini(chan, ntfy);
486 	return nouveau_abi16_put(abi16, ret);
487 }
488 
489 int
490 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
491 {
492 	struct drm_nouveau_gpuobj_free *fini = data;
493 	struct {
494 		struct nvif_ioctl_v0 ioctl;
495 		struct nvif_ioctl_del del;
496 	} args = {
497 		.ioctl.owner = NVDRM_OBJECT_ABI16,
498 		.ioctl.type = NVIF_IOCTL_V0_DEL,
499 		.ioctl.path_nr = 4,
500 		.ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
501 		.ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
502 		.ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
503 		.ioctl.path[0] = fini->handle,
504 	};
505 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
506 	struct nouveau_abi16_chan *chan;
507 	struct nouveau_abi16_ntfy *ntfy;
508 	struct nvif_client *client;
509 	int ret;
510 
511 	if (unlikely(!abi16))
512 		return -ENOMEM;
513 
514 	chan = nouveau_abi16_chan(abi16, fini->channel);
515 	if (!chan)
516 		return nouveau_abi16_put(abi16, -ENOENT);
517 	client = nvif_client(nvif_object(&abi16->device));
518 
519 	/* synchronize with the user channel and destroy the gpu object */
520 	nouveau_channel_idle(chan->chan);
521 
522 	ret = nvif_client_ioctl(client, &args, sizeof(args));
523 	if (ret)
524 		return nouveau_abi16_put(abi16, ret);
525 
526 	/* cleanup extra state if this object was a notifier */
527 	list_for_each_entry(ntfy, &chan->notifiers, head) {
528 		if (ntfy->handle == fini->handle) {
529 			nvkm_mm_free(&chan->heap, &ntfy->node);
530 			list_del(&ntfy->head);
531 			break;
532 		}
533 	}
534 
535 	return nouveau_abi16_put(abi16, 0);
536 }
537