1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5
6 #include <linux/module.h>
7 #include <linux/of.h>
8 #include <linux/pagemap.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <drm/panfrost_drm.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_ioctl.h>
14 #include <drm/drm_syncobj.h>
15 #include <drm/drm_utils.h>
16
17 #include "panfrost_device.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_mmu.h"
20 #include "panfrost_job.h"
21 #include "panfrost_gpu.h"
22 #include "panfrost_perfcnt.h"
23
24 static bool unstable_ioctls;
25 module_param_unsafe(unstable_ioctls, bool, 0600);
26
panfrost_ioctl_get_param(struct drm_device * ddev,void * data,struct drm_file * file)27 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
28 {
29 struct drm_panfrost_get_param *param = data;
30 struct panfrost_device *pfdev = ddev->dev_private;
31
32 if (param->pad != 0)
33 return -EINVAL;
34
35 #define PANFROST_FEATURE(name, member) \
36 case DRM_PANFROST_PARAM_ ## name: \
37 param->value = pfdev->features.member; \
38 break
39 #define PANFROST_FEATURE_ARRAY(name, member, max) \
40 case DRM_PANFROST_PARAM_ ## name ## 0 ... \
41 DRM_PANFROST_PARAM_ ## name ## max: \
42 param->value = pfdev->features.member[param->param - \
43 DRM_PANFROST_PARAM_ ## name ## 0]; \
44 break
45
46 switch (param->param) {
47 PANFROST_FEATURE(GPU_PROD_ID, id);
48 PANFROST_FEATURE(GPU_REVISION, revision);
49 PANFROST_FEATURE(SHADER_PRESENT, shader_present);
50 PANFROST_FEATURE(TILER_PRESENT, tiler_present);
51 PANFROST_FEATURE(L2_PRESENT, l2_present);
52 PANFROST_FEATURE(STACK_PRESENT, stack_present);
53 PANFROST_FEATURE(AS_PRESENT, as_present);
54 PANFROST_FEATURE(JS_PRESENT, js_present);
55 PANFROST_FEATURE(L2_FEATURES, l2_features);
56 PANFROST_FEATURE(CORE_FEATURES, core_features);
57 PANFROST_FEATURE(TILER_FEATURES, tiler_features);
58 PANFROST_FEATURE(MEM_FEATURES, mem_features);
59 PANFROST_FEATURE(MMU_FEATURES, mmu_features);
60 PANFROST_FEATURE(THREAD_FEATURES, thread_features);
61 PANFROST_FEATURE(MAX_THREADS, max_threads);
62 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
63 thread_max_workgroup_sz);
64 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
65 thread_max_barrier_sz);
66 PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
67 PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
68 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
69 PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
70 PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
71 PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
72 default:
73 return -EINVAL;
74 }
75
76 return 0;
77 }
78
panfrost_ioctl_create_bo(struct drm_device * dev,void * data,struct drm_file * file)79 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
80 struct drm_file *file)
81 {
82 struct panfrost_file_priv *priv = file->driver_priv;
83 struct panfrost_gem_object *bo;
84 struct drm_panfrost_create_bo *args = data;
85 struct panfrost_gem_mapping *mapping;
86 int ret;
87
88 if (!args->size || args->pad ||
89 (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
90 return -EINVAL;
91
92 /* Heaps should never be executable */
93 if ((args->flags & PANFROST_BO_HEAP) &&
94 !(args->flags & PANFROST_BO_NOEXEC))
95 return -EINVAL;
96
97 bo = panfrost_gem_create(dev, args->size, args->flags);
98 if (IS_ERR(bo))
99 return PTR_ERR(bo);
100
101 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
102 if (ret)
103 goto out;
104
105 mapping = panfrost_gem_mapping_get(bo, priv);
106 if (mapping) {
107 args->offset = mapping->mmnode.start << PAGE_SHIFT;
108 panfrost_gem_mapping_put(mapping);
109 } else {
110 /* This can only happen if the handle from
111 * drm_gem_handle_create() has already been guessed and freed
112 * by user space
113 */
114 ret = -EINVAL;
115 }
116
117 out:
118 drm_gem_object_put(&bo->base.base);
119 return ret;
120 }
121
122 /**
123 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
124 * referenced by the job.
125 * @dev: DRM device
126 * @file_priv: DRM file for this fd
127 * @args: IOCTL args
128 * @job: job being set up
129 *
130 * Resolve handles from userspace to BOs and attach them to job.
131 *
132 * Note that this function doesn't need to unreference the BOs on
133 * failure, because that will happen at panfrost_job_cleanup() time.
134 */
135 static int
panfrost_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct drm_panfrost_submit * args,struct panfrost_job * job)136 panfrost_lookup_bos(struct drm_device *dev,
137 struct drm_file *file_priv,
138 struct drm_panfrost_submit *args,
139 struct panfrost_job *job)
140 {
141 struct panfrost_file_priv *priv = file_priv->driver_priv;
142 struct panfrost_gem_object *bo;
143 unsigned int i;
144 int ret;
145
146 job->bo_count = args->bo_handle_count;
147
148 if (!job->bo_count)
149 return 0;
150
151 ret = drm_gem_objects_lookup(file_priv,
152 (void __user *)(uintptr_t)args->bo_handles,
153 job->bo_count, &job->bos);
154 if (ret)
155 return ret;
156
157 job->mappings = kvmalloc_array(job->bo_count,
158 sizeof(struct panfrost_gem_mapping *),
159 GFP_KERNEL | __GFP_ZERO);
160 if (!job->mappings)
161 return -ENOMEM;
162
163 for (i = 0; i < job->bo_count; i++) {
164 struct panfrost_gem_mapping *mapping;
165
166 bo = to_panfrost_bo(job->bos[i]);
167 mapping = panfrost_gem_mapping_get(bo, priv);
168 if (!mapping) {
169 ret = -EINVAL;
170 break;
171 }
172
173 atomic_inc(&bo->gpu_usecount);
174 job->mappings[i] = mapping;
175 }
176
177 return ret;
178 }
179
180 /**
181 * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
182 * referenced by the job.
183 * @dev: DRM device
184 * @file_priv: DRM file for this fd
185 * @args: IOCTL args
186 * @job: job being set up
187 *
188 * Resolve syncobjs from userspace to fences and attach them to job.
189 *
190 * Note that this function doesn't need to unreference the fences on
191 * failure, because that will happen at panfrost_job_cleanup() time.
192 */
193 static int
panfrost_copy_in_sync(struct drm_device * dev,struct drm_file * file_priv,struct drm_panfrost_submit * args,struct panfrost_job * job)194 panfrost_copy_in_sync(struct drm_device *dev,
195 struct drm_file *file_priv,
196 struct drm_panfrost_submit *args,
197 struct panfrost_job *job)
198 {
199 u32 *handles;
200 int ret = 0;
201 int i, in_fence_count;
202
203 in_fence_count = args->in_sync_count;
204
205 if (!in_fence_count)
206 return 0;
207
208 handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
209 if (!handles) {
210 ret = -ENOMEM;
211 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
212 goto fail;
213 }
214
215 if (copy_from_user(handles,
216 (void __user *)(uintptr_t)args->in_syncs,
217 in_fence_count * sizeof(u32))) {
218 ret = -EFAULT;
219 DRM_DEBUG("Failed to copy in syncobj handles\n");
220 goto fail;
221 }
222
223 for (i = 0; i < in_fence_count; i++) {
224 ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
225 handles[i], 0);
226 if (ret)
227 goto fail;
228 }
229
230 fail:
231 kvfree(handles);
232 return ret;
233 }
234
panfrost_ioctl_submit(struct drm_device * dev,void * data,struct drm_file * file)235 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
236 struct drm_file *file)
237 {
238 struct panfrost_device *pfdev = dev->dev_private;
239 struct panfrost_file_priv *file_priv = file->driver_priv;
240 struct drm_panfrost_submit *args = data;
241 struct drm_syncobj *sync_out = NULL;
242 struct panfrost_job *job;
243 int ret = 0, slot;
244
245 if (!args->jc)
246 return -EINVAL;
247
248 if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
249 return -EINVAL;
250
251 if (args->out_sync > 0) {
252 sync_out = drm_syncobj_find(file, args->out_sync);
253 if (!sync_out)
254 return -ENODEV;
255 }
256
257 job = kzalloc(sizeof(*job), GFP_KERNEL);
258 if (!job) {
259 ret = -ENOMEM;
260 goto out_put_syncout;
261 }
262
263 kref_init(&job->refcount);
264
265 job->pfdev = pfdev;
266 job->jc = args->jc;
267 job->requirements = args->requirements;
268 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
269 job->mmu = file_priv->mmu;
270
271 slot = panfrost_job_get_slot(job);
272
273 ret = drm_sched_job_init(&job->base,
274 &file_priv->sched_entity[slot],
275 NULL);
276 if (ret)
277 goto out_put_job;
278
279 ret = panfrost_copy_in_sync(dev, file, args, job);
280 if (ret)
281 goto out_cleanup_job;
282
283 ret = panfrost_lookup_bos(dev, file, args, job);
284 if (ret)
285 goto out_cleanup_job;
286
287 ret = panfrost_job_push(job);
288 if (ret)
289 goto out_cleanup_job;
290
291 /* Update the return sync object for the job */
292 if (sync_out)
293 drm_syncobj_replace_fence(sync_out, job->render_done_fence);
294
295 out_cleanup_job:
296 if (ret)
297 drm_sched_job_cleanup(&job->base);
298 out_put_job:
299 panfrost_job_put(job);
300 out_put_syncout:
301 if (sync_out)
302 drm_syncobj_put(sync_out);
303
304 return ret;
305 }
306
307 static int
panfrost_ioctl_wait_bo(struct drm_device * dev,void * data,struct drm_file * file_priv)308 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
309 struct drm_file *file_priv)
310 {
311 long ret;
312 struct drm_panfrost_wait_bo *args = data;
313 struct drm_gem_object *gem_obj;
314 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
315
316 if (args->pad)
317 return -EINVAL;
318
319 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
320 if (!gem_obj)
321 return -ENOENT;
322
323 ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
324 true, timeout);
325 if (!ret)
326 ret = timeout ? -ETIMEDOUT : -EBUSY;
327
328 drm_gem_object_put(gem_obj);
329
330 return ret;
331 }
332
panfrost_ioctl_mmap_bo(struct drm_device * dev,void * data,struct drm_file * file_priv)333 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
334 struct drm_file *file_priv)
335 {
336 struct drm_panfrost_mmap_bo *args = data;
337 struct drm_gem_object *gem_obj;
338 int ret;
339
340 if (args->flags != 0) {
341 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
342 return -EINVAL;
343 }
344
345 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
346 if (!gem_obj) {
347 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
348 return -ENOENT;
349 }
350
351 /* Don't allow mmapping of heap objects as pages are not pinned. */
352 if (to_panfrost_bo(gem_obj)->is_heap) {
353 ret = -EINVAL;
354 goto out;
355 }
356
357 ret = drm_gem_create_mmap_offset(gem_obj);
358 if (ret == 0)
359 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
360
361 out:
362 drm_gem_object_put(gem_obj);
363 return ret;
364 }
365
panfrost_ioctl_get_bo_offset(struct drm_device * dev,void * data,struct drm_file * file_priv)366 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
367 struct drm_file *file_priv)
368 {
369 struct panfrost_file_priv *priv = file_priv->driver_priv;
370 struct drm_panfrost_get_bo_offset *args = data;
371 struct panfrost_gem_mapping *mapping;
372 struct drm_gem_object *gem_obj;
373 struct panfrost_gem_object *bo;
374
375 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
376 if (!gem_obj) {
377 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
378 return -ENOENT;
379 }
380 bo = to_panfrost_bo(gem_obj);
381
382 mapping = panfrost_gem_mapping_get(bo, priv);
383 drm_gem_object_put(gem_obj);
384
385 if (!mapping)
386 return -EINVAL;
387
388 args->offset = mapping->mmnode.start << PAGE_SHIFT;
389 panfrost_gem_mapping_put(mapping);
390 return 0;
391 }
392
panfrost_ioctl_madvise(struct drm_device * dev,void * data,struct drm_file * file_priv)393 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
394 struct drm_file *file_priv)
395 {
396 struct panfrost_file_priv *priv = file_priv->driver_priv;
397 struct drm_panfrost_madvise *args = data;
398 struct panfrost_device *pfdev = dev->dev_private;
399 struct drm_gem_object *gem_obj;
400 struct panfrost_gem_object *bo;
401 int ret = 0;
402
403 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
404 if (!gem_obj) {
405 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
406 return -ENOENT;
407 }
408
409 bo = to_panfrost_bo(gem_obj);
410
411 ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
412 if (ret)
413 goto out_put_object;
414
415 mutex_lock(&pfdev->shrinker_lock);
416 mutex_lock(&bo->mappings.lock);
417 if (args->madv == PANFROST_MADV_DONTNEED) {
418 struct panfrost_gem_mapping *first;
419
420 first = list_first_entry(&bo->mappings.list,
421 struct panfrost_gem_mapping,
422 node);
423
424 /*
425 * If we want to mark the BO purgeable, there must be only one
426 * user: the caller FD.
427 * We could do something smarter and mark the BO purgeable only
428 * when all its users have marked it purgeable, but globally
429 * visible/shared BOs are likely to never be marked purgeable
430 * anyway, so let's not bother.
431 */
432 if (!list_is_singular(&bo->mappings.list) ||
433 WARN_ON_ONCE(first->mmu != priv->mmu)) {
434 ret = -EINVAL;
435 goto out_unlock_mappings;
436 }
437 }
438
439 args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
440
441 if (args->retained) {
442 if (args->madv == PANFROST_MADV_DONTNEED)
443 list_move_tail(&bo->base.madv_list,
444 &pfdev->shrinker_list);
445 else if (args->madv == PANFROST_MADV_WILLNEED)
446 list_del_init(&bo->base.madv_list);
447 }
448
449 out_unlock_mappings:
450 mutex_unlock(&bo->mappings.lock);
451 mutex_unlock(&pfdev->shrinker_lock);
452 dma_resv_unlock(bo->base.base.resv);
453 out_put_object:
454 drm_gem_object_put(gem_obj);
455 return ret;
456 }
457
panfrost_unstable_ioctl_check(void)458 int panfrost_unstable_ioctl_check(void)
459 {
460 if (!unstable_ioctls)
461 return -ENOSYS;
462
463 return 0;
464 }
465
466 static int
panfrost_open(struct drm_device * dev,struct drm_file * file)467 panfrost_open(struct drm_device *dev, struct drm_file *file)
468 {
469 int ret;
470 struct panfrost_device *pfdev = dev->dev_private;
471 struct panfrost_file_priv *panfrost_priv;
472
473 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
474 if (!panfrost_priv)
475 return -ENOMEM;
476
477 panfrost_priv->pfdev = pfdev;
478 file->driver_priv = panfrost_priv;
479
480 panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
481 if (IS_ERR(panfrost_priv->mmu)) {
482 ret = PTR_ERR(panfrost_priv->mmu);
483 goto err_free;
484 }
485
486 ret = panfrost_job_open(panfrost_priv);
487 if (ret)
488 goto err_job;
489
490 return 0;
491
492 err_job:
493 panfrost_mmu_ctx_put(panfrost_priv->mmu);
494 err_free:
495 kfree(panfrost_priv);
496 return ret;
497 }
498
499 static void
panfrost_postclose(struct drm_device * dev,struct drm_file * file)500 panfrost_postclose(struct drm_device *dev, struct drm_file *file)
501 {
502 struct panfrost_file_priv *panfrost_priv = file->driver_priv;
503
504 panfrost_perfcnt_close(file);
505 panfrost_job_close(panfrost_priv);
506
507 panfrost_mmu_ctx_put(panfrost_priv->mmu);
508 kfree(panfrost_priv);
509 }
510
511 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
512 #define PANFROST_IOCTL(n, func, flags) \
513 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
514
515 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW),
516 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
517 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
518 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
519 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
520 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
521 PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
522 PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
523 PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
524 };
525
526 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
527
528 /*
529 * Panfrost driver version:
530 * - 1.0 - initial interface
531 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
532 * - 1.2 - adds AFBC_FEATURES query
533 */
534 static const struct drm_driver panfrost_drm_driver = {
535 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
536 .open = panfrost_open,
537 .postclose = panfrost_postclose,
538 .ioctls = panfrost_drm_driver_ioctls,
539 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
540 .fops = &panfrost_drm_driver_fops,
541 .name = "panfrost",
542 .desc = "panfrost DRM",
543 .date = "20180908",
544 .major = 1,
545 .minor = 2,
546
547 .gem_create_object = panfrost_gem_create_object,
548 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
549 };
550
panfrost_probe(struct platform_device * pdev)551 static int panfrost_probe(struct platform_device *pdev)
552 {
553 struct panfrost_device *pfdev;
554 struct drm_device *ddev;
555 int err;
556
557 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
558 if (!pfdev)
559 return -ENOMEM;
560
561 pfdev->pdev = pdev;
562 pfdev->dev = &pdev->dev;
563
564 platform_set_drvdata(pdev, pfdev);
565
566 pfdev->comp = of_device_get_match_data(&pdev->dev);
567 if (!pfdev->comp)
568 return -ENODEV;
569
570 pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
571
572 /* Allocate and initialize the DRM device. */
573 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
574 if (IS_ERR(ddev))
575 return PTR_ERR(ddev);
576
577 ddev->dev_private = pfdev;
578 pfdev->ddev = ddev;
579
580 mutex_init(&pfdev->shrinker_lock);
581 INIT_LIST_HEAD(&pfdev->shrinker_list);
582
583 err = panfrost_device_init(pfdev);
584 if (err) {
585 if (err != -EPROBE_DEFER)
586 dev_err(&pdev->dev, "Fatal error during GPU init\n");
587 goto err_out0;
588 }
589
590 pm_runtime_set_active(pfdev->dev);
591 pm_runtime_mark_last_busy(pfdev->dev);
592 pm_runtime_enable(pfdev->dev);
593 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
594 pm_runtime_use_autosuspend(pfdev->dev);
595
596 /*
597 * Register the DRM device with the core and the connectors with
598 * sysfs
599 */
600 err = drm_dev_register(ddev, 0);
601 if (err < 0)
602 goto err_out1;
603
604 panfrost_gem_shrinker_init(ddev);
605
606 return 0;
607
608 err_out1:
609 pm_runtime_disable(pfdev->dev);
610 panfrost_device_fini(pfdev);
611 pm_runtime_set_suspended(pfdev->dev);
612 err_out0:
613 drm_dev_put(ddev);
614 return err;
615 }
616
panfrost_remove(struct platform_device * pdev)617 static void panfrost_remove(struct platform_device *pdev)
618 {
619 struct panfrost_device *pfdev = platform_get_drvdata(pdev);
620 struct drm_device *ddev = pfdev->ddev;
621
622 drm_dev_unregister(ddev);
623 panfrost_gem_shrinker_cleanup(ddev);
624
625 pm_runtime_get_sync(pfdev->dev);
626 pm_runtime_disable(pfdev->dev);
627 panfrost_device_fini(pfdev);
628 pm_runtime_set_suspended(pfdev->dev);
629
630 drm_dev_put(ddev);
631 }
632
633 /*
634 * The OPP core wants the supply names to be NULL terminated, but we need the
635 * correct num_supplies value for regulator core. Hence, we NULL terminate here
636 * and then initialize num_supplies with ARRAY_SIZE - 1.
637 */
638 static const char * const default_supplies[] = { "mali", NULL };
639 static const struct panfrost_compatible default_data = {
640 .num_supplies = ARRAY_SIZE(default_supplies) - 1,
641 .supply_names = default_supplies,
642 .num_pm_domains = 1, /* optional */
643 .pm_domain_names = NULL,
644 };
645
646 static const struct panfrost_compatible amlogic_data = {
647 .num_supplies = ARRAY_SIZE(default_supplies) - 1,
648 .supply_names = default_supplies,
649 .vendor_quirk = panfrost_gpu_amlogic_quirk,
650 };
651
652 /*
653 * The old data with two power supplies for MT8183 is here only to
654 * keep retro-compatibility with older devicetrees, as DVFS will
655 * not work with this one.
656 *
657 * On new devicetrees please use the _b variant with a single and
658 * coupled regulators instead.
659 */
660 static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL };
661 static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
662 static const struct panfrost_compatible mediatek_mt8183_data = {
663 .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1,
664 .supply_names = mediatek_mt8183_supplies,
665 .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
666 .pm_domain_names = mediatek_mt8183_pm_domains,
667 };
668
669 static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL };
670 static const struct panfrost_compatible mediatek_mt8183_b_data = {
671 .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
672 .supply_names = mediatek_mt8183_b_supplies,
673 .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
674 .pm_domain_names = mediatek_mt8183_pm_domains,
675 };
676
677 static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
678 static const struct panfrost_compatible mediatek_mt8186_data = {
679 .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1,
680 .supply_names = mediatek_mt8183_b_supplies,
681 .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
682 .pm_domain_names = mediatek_mt8186_pm_domains,
683 };
684
685 static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
686 static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2",
687 "core3", "core4" };
688 static const struct panfrost_compatible mediatek_mt8192_data = {
689 .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1,
690 .supply_names = mediatek_mt8192_supplies,
691 .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
692 .pm_domain_names = mediatek_mt8192_pm_domains,
693 };
694
695 static const struct of_device_id dt_match[] = {
696 /* Set first to probe before the generic compatibles */
697 { .compatible = "amlogic,meson-gxm-mali",
698 .data = &amlogic_data, },
699 { .compatible = "amlogic,meson-g12a-mali",
700 .data = &amlogic_data, },
701 { .compatible = "arm,mali-t604", .data = &default_data, },
702 { .compatible = "arm,mali-t624", .data = &default_data, },
703 { .compatible = "arm,mali-t628", .data = &default_data, },
704 { .compatible = "arm,mali-t720", .data = &default_data, },
705 { .compatible = "arm,mali-t760", .data = &default_data, },
706 { .compatible = "arm,mali-t820", .data = &default_data, },
707 { .compatible = "arm,mali-t830", .data = &default_data, },
708 { .compatible = "arm,mali-t860", .data = &default_data, },
709 { .compatible = "arm,mali-t880", .data = &default_data, },
710 { .compatible = "arm,mali-bifrost", .data = &default_data, },
711 { .compatible = "arm,mali-valhall-jm", .data = &default_data, },
712 { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
713 { .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data },
714 { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
715 { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
716 {}
717 };
718 MODULE_DEVICE_TABLE(of, dt_match);
719
720 static struct platform_driver panfrost_driver = {
721 .probe = panfrost_probe,
722 .remove_new = panfrost_remove,
723 .driver = {
724 .name = "panfrost",
725 .pm = pm_ptr(&panfrost_pm_ops),
726 .of_match_table = dt_match,
727 },
728 };
729 module_platform_driver(panfrost_driver);
730
731 MODULE_AUTHOR("Panfrost Project Developers");
732 MODULE_DESCRIPTION("Panfrost DRM Driver");
733 MODULE_LICENSE("GPL v2");
734 MODULE_SOFTDEP("pre: governor_simpleondemand");
735