xref: /openbmc/linux/drivers/gpu/drm/tegra/drm.c (revision 4f3db074)
1 /*
2  * Copyright (C) 2012 Avionic Design GmbH
3  * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_atomic_helper.h>
15 
16 #include "drm.h"
17 #include "gem.h"
18 
19 #define DRIVER_NAME "tegra"
20 #define DRIVER_DESC "NVIDIA Tegra graphics"
21 #define DRIVER_DATE "20120330"
22 #define DRIVER_MAJOR 0
23 #define DRIVER_MINOR 0
24 #define DRIVER_PATCHLEVEL 0
25 
26 struct tegra_drm_file {
27 	struct list_head contexts;
28 };
29 
30 static void tegra_atomic_schedule(struct tegra_drm *tegra,
31 				  struct drm_atomic_state *state)
32 {
33 	tegra->commit.state = state;
34 	schedule_work(&tegra->commit.work);
35 }
36 
37 static void tegra_atomic_complete(struct tegra_drm *tegra,
38 				  struct drm_atomic_state *state)
39 {
40 	struct drm_device *drm = tegra->drm;
41 
42 	/*
43 	 * Everything below can be run asynchronously without the need to grab
44 	 * any modeset locks at all under one condition: It must be guaranteed
45 	 * that the asynchronous work has either been cancelled (if the driver
46 	 * supports it, which at least requires that the framebuffers get
47 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
48 	 * before the new state gets committed on the software side with
49 	 * drm_atomic_helper_swap_state().
50 	 *
51 	 * This scheme allows new atomic state updates to be prepared and
52 	 * checked in parallel to the asynchronous completion of the previous
53 	 * update. Which is important since compositors need to figure out the
54 	 * composition of the next frame right after having submitted the
55 	 * current layout.
56 	 */
57 
58 	drm_atomic_helper_commit_modeset_disables(drm, state);
59 	drm_atomic_helper_commit_planes(drm, state);
60 	drm_atomic_helper_commit_modeset_enables(drm, state);
61 
62 	drm_atomic_helper_wait_for_vblanks(drm, state);
63 
64 	drm_atomic_helper_cleanup_planes(drm, state);
65 	drm_atomic_state_free(state);
66 }
67 
68 static void tegra_atomic_work(struct work_struct *work)
69 {
70 	struct tegra_drm *tegra = container_of(work, struct tegra_drm,
71 					       commit.work);
72 
73 	tegra_atomic_complete(tegra, tegra->commit.state);
74 }
75 
76 static int tegra_atomic_commit(struct drm_device *drm,
77 			       struct drm_atomic_state *state, bool async)
78 {
79 	struct tegra_drm *tegra = drm->dev_private;
80 	int err;
81 
82 	err = drm_atomic_helper_prepare_planes(drm, state);
83 	if (err)
84 		return err;
85 
86 	/* serialize outstanding asynchronous commits */
87 	mutex_lock(&tegra->commit.lock);
88 	flush_work(&tegra->commit.work);
89 
90 	/*
91 	 * This is the point of no return - everything below never fails except
92 	 * when the hw goes bonghits. Which means we can commit the new state on
93 	 * the software side now.
94 	 */
95 
96 	drm_atomic_helper_swap_state(drm, state);
97 
98 	if (async)
99 		tegra_atomic_schedule(tegra, state);
100 	else
101 		tegra_atomic_complete(tegra, state);
102 
103 	mutex_unlock(&tegra->commit.lock);
104 	return 0;
105 }
106 
107 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
108 	.fb_create = tegra_fb_create,
109 #ifdef CONFIG_DRM_TEGRA_FBDEV
110 	.output_poll_changed = tegra_fb_output_poll_changed,
111 #endif
112 	.atomic_check = drm_atomic_helper_check,
113 	.atomic_commit = tegra_atomic_commit,
114 };
115 
116 static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
117 {
118 	struct host1x_device *device = to_host1x_device(drm->dev);
119 	struct tegra_drm *tegra;
120 	int err;
121 
122 	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
123 	if (!tegra)
124 		return -ENOMEM;
125 
126 	if (iommu_present(&platform_bus_type)) {
127 		tegra->domain = iommu_domain_alloc(&platform_bus_type);
128 		if (!tegra->domain) {
129 			err = -ENOMEM;
130 			goto free;
131 		}
132 
133 		DRM_DEBUG("IOMMU context initialized\n");
134 		drm_mm_init(&tegra->mm, 0, SZ_2G);
135 	}
136 
137 	mutex_init(&tegra->clients_lock);
138 	INIT_LIST_HEAD(&tegra->clients);
139 
140 	mutex_init(&tegra->commit.lock);
141 	INIT_WORK(&tegra->commit.work, tegra_atomic_work);
142 
143 	drm->dev_private = tegra;
144 	tegra->drm = drm;
145 
146 	drm_mode_config_init(drm);
147 
148 	drm->mode_config.min_width = 0;
149 	drm->mode_config.min_height = 0;
150 
151 	drm->mode_config.max_width = 4096;
152 	drm->mode_config.max_height = 4096;
153 
154 	drm->mode_config.funcs = &tegra_drm_mode_funcs;
155 
156 	err = tegra_drm_fb_prepare(drm);
157 	if (err < 0)
158 		goto config;
159 
160 	drm_kms_helper_poll_init(drm);
161 
162 	err = host1x_device_init(device);
163 	if (err < 0)
164 		goto fbdev;
165 
166 	drm_mode_config_reset(drm);
167 
168 	/*
169 	 * We don't use the drm_irq_install() helpers provided by the DRM
170 	 * core, so we need to set this manually in order to allow the
171 	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
172 	 */
173 	drm->irq_enabled = true;
174 
175 	/* syncpoints are used for full 32-bit hardware VBLANK counters */
176 	drm->vblank_disable_immediate = true;
177 	drm->max_vblank_count = 0xffffffff;
178 
179 	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
180 	if (err < 0)
181 		goto device;
182 
183 	err = tegra_drm_fb_init(drm);
184 	if (err < 0)
185 		goto vblank;
186 
187 	return 0;
188 
189 vblank:
190 	drm_vblank_cleanup(drm);
191 device:
192 	host1x_device_exit(device);
193 fbdev:
194 	drm_kms_helper_poll_fini(drm);
195 	tegra_drm_fb_free(drm);
196 config:
197 	drm_mode_config_cleanup(drm);
198 
199 	if (tegra->domain) {
200 		iommu_domain_free(tegra->domain);
201 		drm_mm_takedown(&tegra->mm);
202 	}
203 free:
204 	kfree(tegra);
205 	return err;
206 }
207 
208 static int tegra_drm_unload(struct drm_device *drm)
209 {
210 	struct host1x_device *device = to_host1x_device(drm->dev);
211 	struct tegra_drm *tegra = drm->dev_private;
212 	int err;
213 
214 	drm_kms_helper_poll_fini(drm);
215 	tegra_drm_fb_exit(drm);
216 	drm_mode_config_cleanup(drm);
217 	drm_vblank_cleanup(drm);
218 
219 	err = host1x_device_exit(device);
220 	if (err < 0)
221 		return err;
222 
223 	if (tegra->domain) {
224 		iommu_domain_free(tegra->domain);
225 		drm_mm_takedown(&tegra->mm);
226 	}
227 
228 	kfree(tegra);
229 
230 	return 0;
231 }
232 
233 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
234 {
235 	struct tegra_drm_file *fpriv;
236 
237 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
238 	if (!fpriv)
239 		return -ENOMEM;
240 
241 	INIT_LIST_HEAD(&fpriv->contexts);
242 	filp->driver_priv = fpriv;
243 
244 	return 0;
245 }
246 
247 static void tegra_drm_context_free(struct tegra_drm_context *context)
248 {
249 	context->client->ops->close_channel(context);
250 	kfree(context);
251 }
252 
253 static void tegra_drm_lastclose(struct drm_device *drm)
254 {
255 #ifdef CONFIG_DRM_TEGRA_FBDEV
256 	struct tegra_drm *tegra = drm->dev_private;
257 
258 	tegra_fbdev_restore_mode(tegra->fbdev);
259 #endif
260 }
261 
262 static struct host1x_bo *
263 host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
264 {
265 	struct drm_gem_object *gem;
266 	struct tegra_bo *bo;
267 
268 	gem = drm_gem_object_lookup(drm, file, handle);
269 	if (!gem)
270 		return NULL;
271 
272 	mutex_lock(&drm->struct_mutex);
273 	drm_gem_object_unreference(gem);
274 	mutex_unlock(&drm->struct_mutex);
275 
276 	bo = to_tegra_bo(gem);
277 	return &bo->base;
278 }
279 
280 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
281 				       struct drm_tegra_reloc __user *src,
282 				       struct drm_device *drm,
283 				       struct drm_file *file)
284 {
285 	u32 cmdbuf, target;
286 	int err;
287 
288 	err = get_user(cmdbuf, &src->cmdbuf.handle);
289 	if (err < 0)
290 		return err;
291 
292 	err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
293 	if (err < 0)
294 		return err;
295 
296 	err = get_user(target, &src->target.handle);
297 	if (err < 0)
298 		return err;
299 
300 	err = get_user(dest->target.offset, &src->target.offset);
301 	if (err < 0)
302 		return err;
303 
304 	err = get_user(dest->shift, &src->shift);
305 	if (err < 0)
306 		return err;
307 
308 	dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
309 	if (!dest->cmdbuf.bo)
310 		return -ENOENT;
311 
312 	dest->target.bo = host1x_bo_lookup(drm, file, target);
313 	if (!dest->target.bo)
314 		return -ENOENT;
315 
316 	return 0;
317 }
318 
319 int tegra_drm_submit(struct tegra_drm_context *context,
320 		     struct drm_tegra_submit *args, struct drm_device *drm,
321 		     struct drm_file *file)
322 {
323 	unsigned int num_cmdbufs = args->num_cmdbufs;
324 	unsigned int num_relocs = args->num_relocs;
325 	unsigned int num_waitchks = args->num_waitchks;
326 	struct drm_tegra_cmdbuf __user *cmdbufs =
327 		(void __user *)(uintptr_t)args->cmdbufs;
328 	struct drm_tegra_reloc __user *relocs =
329 		(void __user *)(uintptr_t)args->relocs;
330 	struct drm_tegra_waitchk __user *waitchks =
331 		(void __user *)(uintptr_t)args->waitchks;
332 	struct drm_tegra_syncpt syncpt;
333 	struct host1x_job *job;
334 	int err;
335 
336 	/* We don't yet support other than one syncpt_incr struct per submit */
337 	if (args->num_syncpts != 1)
338 		return -EINVAL;
339 
340 	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
341 			       args->num_relocs, args->num_waitchks);
342 	if (!job)
343 		return -ENOMEM;
344 
345 	job->num_relocs = args->num_relocs;
346 	job->num_waitchk = args->num_waitchks;
347 	job->client = (u32)args->context;
348 	job->class = context->client->base.class;
349 	job->serialize = true;
350 
351 	while (num_cmdbufs) {
352 		struct drm_tegra_cmdbuf cmdbuf;
353 		struct host1x_bo *bo;
354 
355 		if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
356 			err = -EFAULT;
357 			goto fail;
358 		}
359 
360 		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
361 		if (!bo) {
362 			err = -ENOENT;
363 			goto fail;
364 		}
365 
366 		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
367 		num_cmdbufs--;
368 		cmdbufs++;
369 	}
370 
371 	/* copy and resolve relocations from submit */
372 	while (num_relocs--) {
373 		err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
374 						  &relocs[num_relocs], drm,
375 						  file);
376 		if (err < 0)
377 			goto fail;
378 	}
379 
380 	if (copy_from_user(job->waitchk, waitchks,
381 			   sizeof(*waitchks) * num_waitchks)) {
382 		err = -EFAULT;
383 		goto fail;
384 	}
385 
386 	if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
387 			   sizeof(syncpt))) {
388 		err = -EFAULT;
389 		goto fail;
390 	}
391 
392 	job->is_addr_reg = context->client->ops->is_addr_reg;
393 	job->syncpt_incrs = syncpt.incrs;
394 	job->syncpt_id = syncpt.id;
395 	job->timeout = 10000;
396 
397 	if (args->timeout && args->timeout < 10000)
398 		job->timeout = args->timeout;
399 
400 	err = host1x_job_pin(job, context->client->base.dev);
401 	if (err)
402 		goto fail;
403 
404 	err = host1x_job_submit(job);
405 	if (err)
406 		goto fail_submit;
407 
408 	args->fence = job->syncpt_end;
409 
410 	host1x_job_put(job);
411 	return 0;
412 
413 fail_submit:
414 	host1x_job_unpin(job);
415 fail:
416 	host1x_job_put(job);
417 	return err;
418 }
419 
420 
421 #ifdef CONFIG_DRM_TEGRA_STAGING
422 static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
423 {
424 	return (struct tegra_drm_context *)(uintptr_t)context;
425 }
426 
427 static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
428 					struct tegra_drm_context *context)
429 {
430 	struct tegra_drm_context *ctx;
431 
432 	list_for_each_entry(ctx, &file->contexts, list)
433 		if (ctx == context)
434 			return true;
435 
436 	return false;
437 }
438 
439 static int tegra_gem_create(struct drm_device *drm, void *data,
440 			    struct drm_file *file)
441 {
442 	struct drm_tegra_gem_create *args = data;
443 	struct tegra_bo *bo;
444 
445 	bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
446 					 &args->handle);
447 	if (IS_ERR(bo))
448 		return PTR_ERR(bo);
449 
450 	return 0;
451 }
452 
453 static int tegra_gem_mmap(struct drm_device *drm, void *data,
454 			  struct drm_file *file)
455 {
456 	struct drm_tegra_gem_mmap *args = data;
457 	struct drm_gem_object *gem;
458 	struct tegra_bo *bo;
459 
460 	gem = drm_gem_object_lookup(drm, file, args->handle);
461 	if (!gem)
462 		return -EINVAL;
463 
464 	bo = to_tegra_bo(gem);
465 
466 	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
467 
468 	drm_gem_object_unreference(gem);
469 
470 	return 0;
471 }
472 
473 static int tegra_syncpt_read(struct drm_device *drm, void *data,
474 			     struct drm_file *file)
475 {
476 	struct host1x *host = dev_get_drvdata(drm->dev->parent);
477 	struct drm_tegra_syncpt_read *args = data;
478 	struct host1x_syncpt *sp;
479 
480 	sp = host1x_syncpt_get(host, args->id);
481 	if (!sp)
482 		return -EINVAL;
483 
484 	args->value = host1x_syncpt_read_min(sp);
485 	return 0;
486 }
487 
488 static int tegra_syncpt_incr(struct drm_device *drm, void *data,
489 			     struct drm_file *file)
490 {
491 	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
492 	struct drm_tegra_syncpt_incr *args = data;
493 	struct host1x_syncpt *sp;
494 
495 	sp = host1x_syncpt_get(host1x, args->id);
496 	if (!sp)
497 		return -EINVAL;
498 
499 	return host1x_syncpt_incr(sp);
500 }
501 
502 static int tegra_syncpt_wait(struct drm_device *drm, void *data,
503 			     struct drm_file *file)
504 {
505 	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
506 	struct drm_tegra_syncpt_wait *args = data;
507 	struct host1x_syncpt *sp;
508 
509 	sp = host1x_syncpt_get(host1x, args->id);
510 	if (!sp)
511 		return -EINVAL;
512 
513 	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
514 				  &args->value);
515 }
516 
517 static int tegra_open_channel(struct drm_device *drm, void *data,
518 			      struct drm_file *file)
519 {
520 	struct tegra_drm_file *fpriv = file->driver_priv;
521 	struct tegra_drm *tegra = drm->dev_private;
522 	struct drm_tegra_open_channel *args = data;
523 	struct tegra_drm_context *context;
524 	struct tegra_drm_client *client;
525 	int err = -ENODEV;
526 
527 	context = kzalloc(sizeof(*context), GFP_KERNEL);
528 	if (!context)
529 		return -ENOMEM;
530 
531 	list_for_each_entry(client, &tegra->clients, list)
532 		if (client->base.class == args->client) {
533 			err = client->ops->open_channel(client, context);
534 			if (err)
535 				break;
536 
537 			list_add(&context->list, &fpriv->contexts);
538 			args->context = (uintptr_t)context;
539 			context->client = client;
540 			return 0;
541 		}
542 
543 	kfree(context);
544 	return err;
545 }
546 
547 static int tegra_close_channel(struct drm_device *drm, void *data,
548 			       struct drm_file *file)
549 {
550 	struct tegra_drm_file *fpriv = file->driver_priv;
551 	struct drm_tegra_close_channel *args = data;
552 	struct tegra_drm_context *context;
553 
554 	context = tegra_drm_get_context(args->context);
555 
556 	if (!tegra_drm_file_owns_context(fpriv, context))
557 		return -EINVAL;
558 
559 	list_del(&context->list);
560 	tegra_drm_context_free(context);
561 
562 	return 0;
563 }
564 
565 static int tegra_get_syncpt(struct drm_device *drm, void *data,
566 			    struct drm_file *file)
567 {
568 	struct tegra_drm_file *fpriv = file->driver_priv;
569 	struct drm_tegra_get_syncpt *args = data;
570 	struct tegra_drm_context *context;
571 	struct host1x_syncpt *syncpt;
572 
573 	context = tegra_drm_get_context(args->context);
574 
575 	if (!tegra_drm_file_owns_context(fpriv, context))
576 		return -ENODEV;
577 
578 	if (args->index >= context->client->base.num_syncpts)
579 		return -EINVAL;
580 
581 	syncpt = context->client->base.syncpts[args->index];
582 	args->id = host1x_syncpt_id(syncpt);
583 
584 	return 0;
585 }
586 
587 static int tegra_submit(struct drm_device *drm, void *data,
588 			struct drm_file *file)
589 {
590 	struct tegra_drm_file *fpriv = file->driver_priv;
591 	struct drm_tegra_submit *args = data;
592 	struct tegra_drm_context *context;
593 
594 	context = tegra_drm_get_context(args->context);
595 
596 	if (!tegra_drm_file_owns_context(fpriv, context))
597 		return -ENODEV;
598 
599 	return context->client->ops->submit(context, args, drm, file);
600 }
601 
602 static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
603 				 struct drm_file *file)
604 {
605 	struct tegra_drm_file *fpriv = file->driver_priv;
606 	struct drm_tegra_get_syncpt_base *args = data;
607 	struct tegra_drm_context *context;
608 	struct host1x_syncpt_base *base;
609 	struct host1x_syncpt *syncpt;
610 
611 	context = tegra_drm_get_context(args->context);
612 
613 	if (!tegra_drm_file_owns_context(fpriv, context))
614 		return -ENODEV;
615 
616 	if (args->syncpt >= context->client->base.num_syncpts)
617 		return -EINVAL;
618 
619 	syncpt = context->client->base.syncpts[args->syncpt];
620 
621 	base = host1x_syncpt_get_base(syncpt);
622 	if (!base)
623 		return -ENXIO;
624 
625 	args->id = host1x_syncpt_base_id(base);
626 
627 	return 0;
628 }
629 
630 static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
631 				struct drm_file *file)
632 {
633 	struct drm_tegra_gem_set_tiling *args = data;
634 	enum tegra_bo_tiling_mode mode;
635 	struct drm_gem_object *gem;
636 	unsigned long value = 0;
637 	struct tegra_bo *bo;
638 
639 	switch (args->mode) {
640 	case DRM_TEGRA_GEM_TILING_MODE_PITCH:
641 		mode = TEGRA_BO_TILING_MODE_PITCH;
642 
643 		if (args->value != 0)
644 			return -EINVAL;
645 
646 		break;
647 
648 	case DRM_TEGRA_GEM_TILING_MODE_TILED:
649 		mode = TEGRA_BO_TILING_MODE_TILED;
650 
651 		if (args->value != 0)
652 			return -EINVAL;
653 
654 		break;
655 
656 	case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
657 		mode = TEGRA_BO_TILING_MODE_BLOCK;
658 
659 		if (args->value > 5)
660 			return -EINVAL;
661 
662 		value = args->value;
663 		break;
664 
665 	default:
666 		return -EINVAL;
667 	}
668 
669 	gem = drm_gem_object_lookup(drm, file, args->handle);
670 	if (!gem)
671 		return -ENOENT;
672 
673 	bo = to_tegra_bo(gem);
674 
675 	bo->tiling.mode = mode;
676 	bo->tiling.value = value;
677 
678 	drm_gem_object_unreference(gem);
679 
680 	return 0;
681 }
682 
683 static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
684 				struct drm_file *file)
685 {
686 	struct drm_tegra_gem_get_tiling *args = data;
687 	struct drm_gem_object *gem;
688 	struct tegra_bo *bo;
689 	int err = 0;
690 
691 	gem = drm_gem_object_lookup(drm, file, args->handle);
692 	if (!gem)
693 		return -ENOENT;
694 
695 	bo = to_tegra_bo(gem);
696 
697 	switch (bo->tiling.mode) {
698 	case TEGRA_BO_TILING_MODE_PITCH:
699 		args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
700 		args->value = 0;
701 		break;
702 
703 	case TEGRA_BO_TILING_MODE_TILED:
704 		args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
705 		args->value = 0;
706 		break;
707 
708 	case TEGRA_BO_TILING_MODE_BLOCK:
709 		args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
710 		args->value = bo->tiling.value;
711 		break;
712 
713 	default:
714 		err = -EINVAL;
715 		break;
716 	}
717 
718 	drm_gem_object_unreference(gem);
719 
720 	return err;
721 }
722 
723 static int tegra_gem_set_flags(struct drm_device *drm, void *data,
724 			       struct drm_file *file)
725 {
726 	struct drm_tegra_gem_set_flags *args = data;
727 	struct drm_gem_object *gem;
728 	struct tegra_bo *bo;
729 
730 	if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
731 		return -EINVAL;
732 
733 	gem = drm_gem_object_lookup(drm, file, args->handle);
734 	if (!gem)
735 		return -ENOENT;
736 
737 	bo = to_tegra_bo(gem);
738 	bo->flags = 0;
739 
740 	if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
741 		bo->flags |= TEGRA_BO_BOTTOM_UP;
742 
743 	drm_gem_object_unreference(gem);
744 
745 	return 0;
746 }
747 
748 static int tegra_gem_get_flags(struct drm_device *drm, void *data,
749 			       struct drm_file *file)
750 {
751 	struct drm_tegra_gem_get_flags *args = data;
752 	struct drm_gem_object *gem;
753 	struct tegra_bo *bo;
754 
755 	gem = drm_gem_object_lookup(drm, file, args->handle);
756 	if (!gem)
757 		return -ENOENT;
758 
759 	bo = to_tegra_bo(gem);
760 	args->flags = 0;
761 
762 	if (bo->flags & TEGRA_BO_BOTTOM_UP)
763 		args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
764 
765 	drm_gem_object_unreference(gem);
766 
767 	return 0;
768 }
769 #endif
770 
771 static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
772 #ifdef CONFIG_DRM_TEGRA_STAGING
773 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
774 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
775 	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
776 	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
777 	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
778 	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
779 	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
780 	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
781 	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
782 	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
783 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
784 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
785 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
786 	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
787 #endif
788 };
789 
790 static const struct file_operations tegra_drm_fops = {
791 	.owner = THIS_MODULE,
792 	.open = drm_open,
793 	.release = drm_release,
794 	.unlocked_ioctl = drm_ioctl,
795 	.mmap = tegra_drm_mmap,
796 	.poll = drm_poll,
797 	.read = drm_read,
798 #ifdef CONFIG_COMPAT
799 	.compat_ioctl = drm_compat_ioctl,
800 #endif
801 	.llseek = noop_llseek,
802 };
803 
804 static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
805 					     unsigned int pipe)
806 {
807 	struct drm_crtc *crtc;
808 
809 	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
810 		if (pipe == drm_crtc_index(crtc))
811 			return crtc;
812 	}
813 
814 	return NULL;
815 }
816 
817 static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
818 {
819 	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
820 	struct tegra_dc *dc = to_tegra_dc(crtc);
821 
822 	if (!crtc)
823 		return 0;
824 
825 	return tegra_dc_get_vblank_counter(dc);
826 }
827 
828 static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
829 {
830 	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
831 	struct tegra_dc *dc = to_tegra_dc(crtc);
832 
833 	if (!crtc)
834 		return -ENODEV;
835 
836 	tegra_dc_enable_vblank(dc);
837 
838 	return 0;
839 }
840 
841 static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
842 {
843 	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
844 	struct tegra_dc *dc = to_tegra_dc(crtc);
845 
846 	if (crtc)
847 		tegra_dc_disable_vblank(dc);
848 }
849 
850 static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
851 {
852 	struct tegra_drm_file *fpriv = file->driver_priv;
853 	struct tegra_drm_context *context, *tmp;
854 	struct drm_crtc *crtc;
855 
856 	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
857 		tegra_dc_cancel_page_flip(crtc, file);
858 
859 	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
860 		tegra_drm_context_free(context);
861 
862 	kfree(fpriv);
863 }
864 
865 #ifdef CONFIG_DEBUG_FS
866 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
867 {
868 	struct drm_info_node *node = (struct drm_info_node *)s->private;
869 	struct drm_device *drm = node->minor->dev;
870 	struct drm_framebuffer *fb;
871 
872 	mutex_lock(&drm->mode_config.fb_lock);
873 
874 	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
875 		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
876 			   fb->base.id, fb->width, fb->height, fb->depth,
877 			   fb->bits_per_pixel,
878 			   atomic_read(&fb->refcount.refcount));
879 	}
880 
881 	mutex_unlock(&drm->mode_config.fb_lock);
882 
883 	return 0;
884 }
885 
886 static int tegra_debugfs_iova(struct seq_file *s, void *data)
887 {
888 	struct drm_info_node *node = (struct drm_info_node *)s->private;
889 	struct drm_device *drm = node->minor->dev;
890 	struct tegra_drm *tegra = drm->dev_private;
891 
892 	return drm_mm_dump_table(s, &tegra->mm);
893 }
894 
895 static struct drm_info_list tegra_debugfs_list[] = {
896 	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
897 	{ "iova", tegra_debugfs_iova, 0 },
898 };
899 
900 static int tegra_debugfs_init(struct drm_minor *minor)
901 {
902 	return drm_debugfs_create_files(tegra_debugfs_list,
903 					ARRAY_SIZE(tegra_debugfs_list),
904 					minor->debugfs_root, minor);
905 }
906 
907 static void tegra_debugfs_cleanup(struct drm_minor *minor)
908 {
909 	drm_debugfs_remove_files(tegra_debugfs_list,
910 				 ARRAY_SIZE(tegra_debugfs_list), minor);
911 }
912 #endif
913 
914 static struct drm_driver tegra_drm_driver = {
915 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
916 	.load = tegra_drm_load,
917 	.unload = tegra_drm_unload,
918 	.open = tegra_drm_open,
919 	.preclose = tegra_drm_preclose,
920 	.lastclose = tegra_drm_lastclose,
921 
922 	.get_vblank_counter = tegra_drm_get_vblank_counter,
923 	.enable_vblank = tegra_drm_enable_vblank,
924 	.disable_vblank = tegra_drm_disable_vblank,
925 
926 #if defined(CONFIG_DEBUG_FS)
927 	.debugfs_init = tegra_debugfs_init,
928 	.debugfs_cleanup = tegra_debugfs_cleanup,
929 #endif
930 
931 	.gem_free_object = tegra_bo_free_object,
932 	.gem_vm_ops = &tegra_bo_vm_ops,
933 
934 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
935 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
936 	.gem_prime_export = tegra_gem_prime_export,
937 	.gem_prime_import = tegra_gem_prime_import,
938 
939 	.dumb_create = tegra_bo_dumb_create,
940 	.dumb_map_offset = tegra_bo_dumb_map_offset,
941 	.dumb_destroy = drm_gem_dumb_destroy,
942 
943 	.ioctls = tegra_drm_ioctls,
944 	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
945 	.fops = &tegra_drm_fops,
946 
947 	.name = DRIVER_NAME,
948 	.desc = DRIVER_DESC,
949 	.date = DRIVER_DATE,
950 	.major = DRIVER_MAJOR,
951 	.minor = DRIVER_MINOR,
952 	.patchlevel = DRIVER_PATCHLEVEL,
953 };
954 
955 int tegra_drm_register_client(struct tegra_drm *tegra,
956 			      struct tegra_drm_client *client)
957 {
958 	mutex_lock(&tegra->clients_lock);
959 	list_add_tail(&client->list, &tegra->clients);
960 	mutex_unlock(&tegra->clients_lock);
961 
962 	return 0;
963 }
964 
965 int tegra_drm_unregister_client(struct tegra_drm *tegra,
966 				struct tegra_drm_client *client)
967 {
968 	mutex_lock(&tegra->clients_lock);
969 	list_del_init(&client->list);
970 	mutex_unlock(&tegra->clients_lock);
971 
972 	return 0;
973 }
974 
975 static int host1x_drm_probe(struct host1x_device *dev)
976 {
977 	struct drm_driver *driver = &tegra_drm_driver;
978 	struct drm_device *drm;
979 	int err;
980 
981 	drm = drm_dev_alloc(driver, &dev->dev);
982 	if (!drm)
983 		return -ENOMEM;
984 
985 	drm_dev_set_unique(drm, dev_name(&dev->dev));
986 	dev_set_drvdata(&dev->dev, drm);
987 
988 	err = drm_dev_register(drm, 0);
989 	if (err < 0)
990 		goto unref;
991 
992 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
993 		 driver->major, driver->minor, driver->patchlevel,
994 		 driver->date, drm->primary->index);
995 
996 	return 0;
997 
998 unref:
999 	drm_dev_unref(drm);
1000 	return err;
1001 }
1002 
1003 static int host1x_drm_remove(struct host1x_device *dev)
1004 {
1005 	struct drm_device *drm = dev_get_drvdata(&dev->dev);
1006 
1007 	drm_dev_unregister(drm);
1008 	drm_dev_unref(drm);
1009 
1010 	return 0;
1011 }
1012 
1013 #ifdef CONFIG_PM_SLEEP
1014 static int host1x_drm_suspend(struct device *dev)
1015 {
1016 	struct drm_device *drm = dev_get_drvdata(dev);
1017 
1018 	drm_kms_helper_poll_disable(drm);
1019 
1020 	return 0;
1021 }
1022 
1023 static int host1x_drm_resume(struct device *dev)
1024 {
1025 	struct drm_device *drm = dev_get_drvdata(dev);
1026 
1027 	drm_kms_helper_poll_enable(drm);
1028 
1029 	return 0;
1030 }
1031 #endif
1032 
1033 static const struct dev_pm_ops host1x_drm_pm_ops = {
1034 	SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
1035 };
1036 
1037 static const struct of_device_id host1x_drm_subdevs[] = {
1038 	{ .compatible = "nvidia,tegra20-dc", },
1039 	{ .compatible = "nvidia,tegra20-hdmi", },
1040 	{ .compatible = "nvidia,tegra20-gr2d", },
1041 	{ .compatible = "nvidia,tegra20-gr3d", },
1042 	{ .compatible = "nvidia,tegra30-dc", },
1043 	{ .compatible = "nvidia,tegra30-hdmi", },
1044 	{ .compatible = "nvidia,tegra30-gr2d", },
1045 	{ .compatible = "nvidia,tegra30-gr3d", },
1046 	{ .compatible = "nvidia,tegra114-dsi", },
1047 	{ .compatible = "nvidia,tegra114-hdmi", },
1048 	{ .compatible = "nvidia,tegra114-gr3d", },
1049 	{ .compatible = "nvidia,tegra124-dc", },
1050 	{ .compatible = "nvidia,tegra124-sor", },
1051 	{ .compatible = "nvidia,tegra124-hdmi", },
1052 	{ /* sentinel */ }
1053 };
1054 
1055 static struct host1x_driver host1x_drm_driver = {
1056 	.driver = {
1057 		.name = "drm",
1058 		.pm = &host1x_drm_pm_ops,
1059 	},
1060 	.probe = host1x_drm_probe,
1061 	.remove = host1x_drm_remove,
1062 	.subdevs = host1x_drm_subdevs,
1063 };
1064 
1065 static int __init host1x_drm_init(void)
1066 {
1067 	int err;
1068 
1069 	err = host1x_driver_register(&host1x_drm_driver);
1070 	if (err < 0)
1071 		return err;
1072 
1073 	err = platform_driver_register(&tegra_dc_driver);
1074 	if (err < 0)
1075 		goto unregister_host1x;
1076 
1077 	err = platform_driver_register(&tegra_dsi_driver);
1078 	if (err < 0)
1079 		goto unregister_dc;
1080 
1081 	err = platform_driver_register(&tegra_sor_driver);
1082 	if (err < 0)
1083 		goto unregister_dsi;
1084 
1085 	err = platform_driver_register(&tegra_hdmi_driver);
1086 	if (err < 0)
1087 		goto unregister_sor;
1088 
1089 	err = platform_driver_register(&tegra_dpaux_driver);
1090 	if (err < 0)
1091 		goto unregister_hdmi;
1092 
1093 	err = platform_driver_register(&tegra_gr2d_driver);
1094 	if (err < 0)
1095 		goto unregister_dpaux;
1096 
1097 	err = platform_driver_register(&tegra_gr3d_driver);
1098 	if (err < 0)
1099 		goto unregister_gr2d;
1100 
1101 	return 0;
1102 
1103 unregister_gr2d:
1104 	platform_driver_unregister(&tegra_gr2d_driver);
1105 unregister_dpaux:
1106 	platform_driver_unregister(&tegra_dpaux_driver);
1107 unregister_hdmi:
1108 	platform_driver_unregister(&tegra_hdmi_driver);
1109 unregister_sor:
1110 	platform_driver_unregister(&tegra_sor_driver);
1111 unregister_dsi:
1112 	platform_driver_unregister(&tegra_dsi_driver);
1113 unregister_dc:
1114 	platform_driver_unregister(&tegra_dc_driver);
1115 unregister_host1x:
1116 	host1x_driver_unregister(&host1x_drm_driver);
1117 	return err;
1118 }
1119 module_init(host1x_drm_init);
1120 
1121 static void __exit host1x_drm_exit(void)
1122 {
1123 	platform_driver_unregister(&tegra_gr3d_driver);
1124 	platform_driver_unregister(&tegra_gr2d_driver);
1125 	platform_driver_unregister(&tegra_dpaux_driver);
1126 	platform_driver_unregister(&tegra_hdmi_driver);
1127 	platform_driver_unregister(&tegra_sor_driver);
1128 	platform_driver_unregister(&tegra_dsi_driver);
1129 	platform_driver_unregister(&tegra_dc_driver);
1130 	host1x_driver_unregister(&host1x_drm_driver);
1131 }
1132 module_exit(host1x_drm_exit);
1133 
1134 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1135 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
1136 MODULE_LICENSE("GPL v2");
1137