1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 6 #include <linux/module.h> 7 #include <linux/of_platform.h> 8 #include <linux/pagemap.h> 9 #include <linux/pm_runtime.h> 10 #include <drm/panfrost_drm.h> 11 #include <drm/drm_drv.h> 12 #include <drm/drm_ioctl.h> 13 #include <drm/drm_syncobj.h> 14 #include <drm/drm_utils.h> 15 16 #include "panfrost_device.h" 17 #include "panfrost_devfreq.h" 18 #include "panfrost_gem.h" 19 #include "panfrost_mmu.h" 20 #include "panfrost_job.h" 21 #include "panfrost_gpu.h" 22 23 static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) 24 { 25 struct drm_panfrost_get_param *param = data; 26 struct panfrost_device *pfdev = ddev->dev_private; 27 28 if (param->pad != 0) 29 return -EINVAL; 30 31 switch (param->param) { 32 case DRM_PANFROST_PARAM_GPU_PROD_ID: 33 param->value = pfdev->features.id; 34 break; 35 default: 36 return -EINVAL; 37 } 38 39 return 0; 40 } 41 42 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, 43 struct drm_file *file) 44 { 45 int ret; 46 struct drm_gem_shmem_object *shmem; 47 struct drm_panfrost_create_bo *args = data; 48 49 if (!args->size || args->flags || args->pad) 50 return -EINVAL; 51 52 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, 53 &args->handle); 54 if (IS_ERR(shmem)) 55 return PTR_ERR(shmem); 56 57 ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base)); 58 if (ret) 59 goto err_free; 60 61 args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT; 62 63 return 0; 64 65 err_free: 66 drm_gem_object_put_unlocked(&shmem->base); 67 return ret; 68 } 69 70 /** 71 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects 72 * referenced by the job. 73 * @dev: DRM device 74 * @file_priv: DRM file for this fd 75 * @args: IOCTL args 76 * @job: job being set up 77 * 78 * Resolve handles from userspace to BOs and attach them to job. 79 * 80 * Note that this function doesn't need to unreference the BOs on 81 * failure, because that will happen at panfrost_job_cleanup() time. 82 */ 83 static int 84 panfrost_lookup_bos(struct drm_device *dev, 85 struct drm_file *file_priv, 86 struct drm_panfrost_submit *args, 87 struct panfrost_job *job) 88 { 89 job->bo_count = args->bo_handle_count; 90 91 if (!job->bo_count) 92 return 0; 93 94 job->implicit_fences = kvmalloc_array(job->bo_count, 95 sizeof(struct dma_fence *), 96 GFP_KERNEL | __GFP_ZERO); 97 if (!job->implicit_fences) 98 return -ENOMEM; 99 100 return drm_gem_objects_lookup(file_priv, 101 (void __user *)(uintptr_t)args->bo_handles, 102 job->bo_count, &job->bos); 103 } 104 105 /** 106 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects 107 * referenced by the job. 108 * @dev: DRM device 109 * @file_priv: DRM file for this fd 110 * @args: IOCTL args 111 * @job: job being set up 112 * 113 * Resolve syncobjs from userspace to fences and attach them to job. 114 * 115 * Note that this function doesn't need to unreference the fences on 116 * failure, because that will happen at panfrost_job_cleanup() time. 117 */ 118 static int 119 panfrost_copy_in_sync(struct drm_device *dev, 120 struct drm_file *file_priv, 121 struct drm_panfrost_submit *args, 122 struct panfrost_job *job) 123 { 124 u32 *handles; 125 int ret = 0; 126 int i; 127 128 job->in_fence_count = args->in_sync_count; 129 130 if (!job->in_fence_count) 131 return 0; 132 133 job->in_fences = kvmalloc_array(job->in_fence_count, 134 sizeof(struct dma_fence *), 135 GFP_KERNEL | __GFP_ZERO); 136 if (!job->in_fences) { 137 DRM_DEBUG("Failed to allocate job in fences\n"); 138 return -ENOMEM; 139 } 140 141 handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL); 142 if (!handles) { 143 ret = -ENOMEM; 144 DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); 145 goto fail; 146 } 147 148 if (copy_from_user(handles, 149 (void __user *)(uintptr_t)args->in_syncs, 150 job->in_fence_count * sizeof(u32))) { 151 ret = -EFAULT; 152 DRM_DEBUG("Failed to copy in syncobj handles\n"); 153 goto fail; 154 } 155 156 for (i = 0; i < job->in_fence_count; i++) { 157 ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, 158 &job->in_fences[i]); 159 if (ret == -EINVAL) 160 goto fail; 161 } 162 163 fail: 164 kvfree(handles); 165 return ret; 166 } 167 168 static int panfrost_ioctl_submit(struct drm_device *dev, void *data, 169 struct drm_file *file) 170 { 171 struct panfrost_device *pfdev = dev->dev_private; 172 struct drm_panfrost_submit *args = data; 173 struct drm_syncobj *sync_out = NULL; 174 struct panfrost_job *job; 175 int ret = 0; 176 177 if (!args->jc) 178 return -EINVAL; 179 180 if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) 181 return -EINVAL; 182 183 if (args->out_sync > 0) { 184 sync_out = drm_syncobj_find(file, args->out_sync); 185 if (!sync_out) 186 return -ENODEV; 187 } 188 189 job = kzalloc(sizeof(*job), GFP_KERNEL); 190 if (!job) { 191 ret = -ENOMEM; 192 goto fail_out_sync; 193 } 194 195 kref_init(&job->refcount); 196 197 job->pfdev = pfdev; 198 job->jc = args->jc; 199 job->requirements = args->requirements; 200 job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); 201 job->file_priv = file->driver_priv; 202 203 ret = panfrost_copy_in_sync(dev, file, args, job); 204 if (ret) 205 goto fail_job; 206 207 ret = panfrost_lookup_bos(dev, file, args, job); 208 if (ret) 209 goto fail_job; 210 211 ret = panfrost_job_push(job); 212 if (ret) 213 goto fail_job; 214 215 /* Update the return sync object for the job */ 216 if (sync_out) 217 drm_syncobj_replace_fence(sync_out, job->render_done_fence); 218 219 fail_job: 220 panfrost_job_put(job); 221 fail_out_sync: 222 if (sync_out) 223 drm_syncobj_put(sync_out); 224 225 return ret; 226 } 227 228 static int 229 panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, 230 struct drm_file *file_priv) 231 { 232 long ret; 233 struct drm_panfrost_wait_bo *args = data; 234 struct drm_gem_object *gem_obj; 235 unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); 236 237 if (args->pad) 238 return -EINVAL; 239 240 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 241 if (!gem_obj) 242 return -ENOENT; 243 244 ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, 245 true, timeout); 246 if (!ret) 247 ret = timeout ? -ETIMEDOUT : -EBUSY; 248 249 drm_gem_object_put_unlocked(gem_obj); 250 251 return ret; 252 } 253 254 static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, 255 struct drm_file *file_priv) 256 { 257 struct drm_panfrost_mmap_bo *args = data; 258 struct drm_gem_object *gem_obj; 259 int ret; 260 261 if (args->flags != 0) { 262 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 263 return -EINVAL; 264 } 265 266 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 267 if (!gem_obj) { 268 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 269 return -ENOENT; 270 } 271 272 ret = drm_gem_create_mmap_offset(gem_obj); 273 if (ret == 0) 274 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 275 drm_gem_object_put_unlocked(gem_obj); 276 277 return ret; 278 } 279 280 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, 281 struct drm_file *file_priv) 282 { 283 struct drm_panfrost_get_bo_offset *args = data; 284 struct drm_gem_object *gem_obj; 285 struct panfrost_gem_object *bo; 286 287 gem_obj = drm_gem_object_lookup(file_priv, args->handle); 288 if (!gem_obj) { 289 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 290 return -ENOENT; 291 } 292 bo = to_panfrost_bo(gem_obj); 293 294 args->offset = bo->node.start << PAGE_SHIFT; 295 296 drm_gem_object_put_unlocked(gem_obj); 297 return 0; 298 } 299 300 static int 301 panfrost_open(struct drm_device *dev, struct drm_file *file) 302 { 303 struct panfrost_device *pfdev = dev->dev_private; 304 struct panfrost_file_priv *panfrost_priv; 305 306 panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); 307 if (!panfrost_priv) 308 return -ENOMEM; 309 310 panfrost_priv->pfdev = pfdev; 311 file->driver_priv = panfrost_priv; 312 313 return panfrost_job_open(panfrost_priv); 314 } 315 316 static void 317 panfrost_postclose(struct drm_device *dev, struct drm_file *file) 318 { 319 struct panfrost_file_priv *panfrost_priv = file->driver_priv; 320 321 panfrost_job_close(panfrost_priv); 322 323 kfree(panfrost_priv); 324 } 325 326 /* DRM_AUTH is required on SUBMIT for now, while all clients share a single 327 * address space. Note that render nodes would be able to submit jobs that 328 * could access BOs from clients authenticated with the master node. 329 */ 330 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { 331 #define PANFROST_IOCTL(n, func, flags) \ 332 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) 333 334 PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH), 335 PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), 336 PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), 337 PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), 338 PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), 339 PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), 340 }; 341 342 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); 343 344 static struct drm_driver panfrost_drm_driver = { 345 .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | 346 DRIVER_SYNCOBJ, 347 .open = panfrost_open, 348 .postclose = panfrost_postclose, 349 .ioctls = panfrost_drm_driver_ioctls, 350 .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), 351 .fops = &panfrost_drm_driver_fops, 352 .name = "panfrost", 353 .desc = "panfrost DRM", 354 .date = "20180908", 355 .major = 1, 356 .minor = 0, 357 358 .gem_create_object = panfrost_gem_create_object, 359 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 360 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 361 .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, 362 .gem_prime_mmap = drm_gem_prime_mmap, 363 }; 364 365 static int panfrost_probe(struct platform_device *pdev) 366 { 367 struct panfrost_device *pfdev; 368 struct drm_device *ddev; 369 int err; 370 371 pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); 372 if (!pfdev) 373 return -ENOMEM; 374 375 pfdev->pdev = pdev; 376 pfdev->dev = &pdev->dev; 377 378 platform_set_drvdata(pdev, pfdev); 379 380 /* Allocate and initialze the DRM device. */ 381 ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); 382 if (IS_ERR(ddev)) 383 return PTR_ERR(ddev); 384 385 ddev->dev_private = pfdev; 386 pfdev->ddev = ddev; 387 388 spin_lock_init(&pfdev->mm_lock); 389 390 /* 4G enough for now. can be 48-bit */ 391 drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); 392 393 pm_runtime_use_autosuspend(pfdev->dev); 394 pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ 395 pm_runtime_enable(pfdev->dev); 396 397 err = panfrost_device_init(pfdev); 398 if (err) { 399 if (err != -EPROBE_DEFER) 400 dev_err(&pdev->dev, "Fatal error during GPU init\n"); 401 goto err_out0; 402 } 403 404 err = panfrost_devfreq_init(pfdev); 405 if (err) { 406 if (err != -EPROBE_DEFER) 407 dev_err(&pdev->dev, "Fatal error during devfreq init\n"); 408 goto err_out1; 409 } 410 411 /* 412 * Register the DRM device with the core and the connectors with 413 * sysfs 414 */ 415 err = drm_dev_register(ddev, 0); 416 if (err < 0) 417 goto err_out1; 418 419 return 0; 420 421 err_out1: 422 panfrost_device_fini(pfdev); 423 err_out0: 424 pm_runtime_disable(pfdev->dev); 425 drm_dev_put(ddev); 426 return err; 427 } 428 429 static int panfrost_remove(struct platform_device *pdev) 430 { 431 struct panfrost_device *pfdev = platform_get_drvdata(pdev); 432 struct drm_device *ddev = pfdev->ddev; 433 434 drm_dev_unregister(ddev); 435 pm_runtime_get_sync(pfdev->dev); 436 pm_runtime_put_sync_autosuspend(pfdev->dev); 437 pm_runtime_disable(pfdev->dev); 438 panfrost_device_fini(pfdev); 439 drm_dev_put(ddev); 440 return 0; 441 } 442 443 static const struct of_device_id dt_match[] = { 444 { .compatible = "arm,mali-t604" }, 445 { .compatible = "arm,mali-t624" }, 446 { .compatible = "arm,mali-t628" }, 447 { .compatible = "arm,mali-t720" }, 448 { .compatible = "arm,mali-t760" }, 449 { .compatible = "arm,mali-t820" }, 450 { .compatible = "arm,mali-t830" }, 451 { .compatible = "arm,mali-t860" }, 452 { .compatible = "arm,mali-t880" }, 453 {} 454 }; 455 MODULE_DEVICE_TABLE(of, dt_match); 456 457 static const struct dev_pm_ops panfrost_pm_ops = { 458 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 459 SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL) 460 }; 461 462 static struct platform_driver panfrost_driver = { 463 .probe = panfrost_probe, 464 .remove = panfrost_remove, 465 .driver = { 466 .name = "panfrost", 467 .pm = &panfrost_pm_ops, 468 .of_match_table = dt_match, 469 }, 470 }; 471 module_platform_driver(panfrost_driver); 472 473 MODULE_AUTHOR("Panfrost Project Developers"); 474 MODULE_DESCRIPTION("Panfrost DRM Driver"); 475 MODULE_LICENSE("GPL v2"); 476