1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2022, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/host1x.h> 10 #include <linux/iommu.h> 11 #include <linux/iopoll.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/reset.h> 17 18 #include <soc/tegra/mc.h> 19 20 #include "drm.h" 21 #include "falcon.h" 22 #include "riscv.h" 23 #include "vic.h" 24 25 #define NVDEC_FALCON_DEBUGINFO 0x1094 26 #define NVDEC_TFBIF_TRANSCFG 0x2c44 27 28 struct nvdec_config { 29 const char *firmware; 30 unsigned int version; 31 bool supports_sid; 32 bool has_riscv; 33 bool has_extra_clocks; 34 }; 35 36 struct nvdec { 37 struct falcon falcon; 38 39 void __iomem *regs; 40 struct tegra_drm_client client; 41 struct host1x_channel *channel; 42 struct device *dev; 43 struct clk_bulk_data clks[3]; 44 unsigned int num_clks; 45 struct reset_control *reset; 46 47 /* Platform configuration */ 48 const struct nvdec_config *config; 49 50 /* RISC-V specific data */ 51 struct tegra_drm_riscv riscv; 52 phys_addr_t carveout_base; 53 }; 54 55 static inline struct nvdec *to_nvdec(struct tegra_drm_client *client) 56 { 57 return container_of(client, struct nvdec, client); 58 } 59 60 static inline void nvdec_writel(struct nvdec *nvdec, u32 value, 61 unsigned int offset) 62 { 63 writel(value, nvdec->regs + offset); 64 } 65 66 static int nvdec_boot_falcon(struct nvdec *nvdec) 67 { 68 u32 stream_id; 69 int err; 70 71 if (nvdec->config->supports_sid && tegra_dev_iommu_get_stream_id(nvdec->dev, &stream_id)) { 72 u32 value; 73 74 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); 75 nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG); 76 77 nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID0); 78 nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID1); 79 } 80 81 err = falcon_boot(&nvdec->falcon); 82 if (err < 0) 83 return err; 84 85 err = falcon_wait_idle(&nvdec->falcon); 86 if (err < 0) { 87 dev_err(nvdec->dev, "falcon boot timed out\n"); 88 return err; 89 } 90 91 return 0; 92 } 93 94 static int nvdec_wait_debuginfo(struct nvdec *nvdec, const char *phase) 95 { 96 int err; 97 u32 val; 98 99 err = readl_poll_timeout(nvdec->regs + NVDEC_FALCON_DEBUGINFO, val, val == 0x0, 10, 100000); 100 if (err) { 101 dev_err(nvdec->dev, "failed to boot %s, debuginfo=0x%x\n", phase, val); 102 return err; 103 } 104 105 return 0; 106 } 107 108 static int nvdec_boot_riscv(struct nvdec *nvdec) 109 { 110 int err; 111 112 err = reset_control_acquire(nvdec->reset); 113 if (err) 114 return err; 115 116 nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO); 117 118 err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1, 119 &nvdec->riscv.bl_desc); 120 if (err) { 121 dev_err(nvdec->dev, "failed to execute bootloader\n"); 122 goto release_reset; 123 } 124 125 err = nvdec_wait_debuginfo(nvdec, "bootloader"); 126 if (err) 127 goto release_reset; 128 129 err = reset_control_reset(nvdec->reset); 130 if (err) 131 goto release_reset; 132 133 nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO); 134 135 err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1, 136 &nvdec->riscv.os_desc); 137 if (err) { 138 dev_err(nvdec->dev, "failed to execute firmware\n"); 139 goto release_reset; 140 } 141 142 err = nvdec_wait_debuginfo(nvdec, "firmware"); 143 if (err) 144 goto release_reset; 145 146 release_reset: 147 reset_control_release(nvdec->reset); 148 149 return err; 150 } 151 152 static int nvdec_init(struct host1x_client *client) 153 { 154 struct tegra_drm_client *drm = host1x_to_drm_client(client); 155 struct drm_device *dev = dev_get_drvdata(client->host); 156 struct tegra_drm *tegra = dev->dev_private; 157 struct nvdec *nvdec = to_nvdec(drm); 158 int err; 159 160 err = host1x_client_iommu_attach(client); 161 if (err < 0 && err != -ENODEV) { 162 dev_err(nvdec->dev, "failed to attach to domain: %d\n", err); 163 return err; 164 } 165 166 nvdec->channel = host1x_channel_request(client); 167 if (!nvdec->channel) { 168 err = -ENOMEM; 169 goto detach; 170 } 171 172 client->syncpts[0] = host1x_syncpt_request(client, 0); 173 if (!client->syncpts[0]) { 174 err = -ENOMEM; 175 goto free_channel; 176 } 177 178 pm_runtime_enable(client->dev); 179 pm_runtime_use_autosuspend(client->dev); 180 pm_runtime_set_autosuspend_delay(client->dev, 500); 181 182 err = tegra_drm_register_client(tegra, drm); 183 if (err < 0) 184 goto disable_rpm; 185 186 /* 187 * Inherit the DMA parameters (such as maximum segment size) from the 188 * parent host1x device. 189 */ 190 client->dev->dma_parms = client->host->dma_parms; 191 192 return 0; 193 194 disable_rpm: 195 pm_runtime_dont_use_autosuspend(client->dev); 196 pm_runtime_force_suspend(client->dev); 197 198 host1x_syncpt_put(client->syncpts[0]); 199 free_channel: 200 host1x_channel_put(nvdec->channel); 201 detach: 202 host1x_client_iommu_detach(client); 203 204 return err; 205 } 206 207 static int nvdec_exit(struct host1x_client *client) 208 { 209 struct tegra_drm_client *drm = host1x_to_drm_client(client); 210 struct drm_device *dev = dev_get_drvdata(client->host); 211 struct tegra_drm *tegra = dev->dev_private; 212 struct nvdec *nvdec = to_nvdec(drm); 213 int err; 214 215 /* avoid a dangling pointer just in case this disappears */ 216 client->dev->dma_parms = NULL; 217 218 err = tegra_drm_unregister_client(tegra, drm); 219 if (err < 0) 220 return err; 221 222 pm_runtime_dont_use_autosuspend(client->dev); 223 pm_runtime_force_suspend(client->dev); 224 225 host1x_syncpt_put(client->syncpts[0]); 226 host1x_channel_put(nvdec->channel); 227 host1x_client_iommu_detach(client); 228 229 nvdec->channel = NULL; 230 231 if (client->group) { 232 dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys, 233 nvdec->falcon.firmware.size, DMA_TO_DEVICE); 234 tegra_drm_free(tegra, nvdec->falcon.firmware.size, 235 nvdec->falcon.firmware.virt, 236 nvdec->falcon.firmware.iova); 237 } else { 238 dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size, 239 nvdec->falcon.firmware.virt, 240 nvdec->falcon.firmware.iova); 241 } 242 243 return 0; 244 } 245 246 static const struct host1x_client_ops nvdec_client_ops = { 247 .init = nvdec_init, 248 .exit = nvdec_exit, 249 }; 250 251 static int nvdec_load_falcon_firmware(struct nvdec *nvdec) 252 { 253 struct host1x_client *client = &nvdec->client.base; 254 struct tegra_drm *tegra = nvdec->client.drm; 255 dma_addr_t iova; 256 size_t size; 257 void *virt; 258 int err; 259 260 if (nvdec->falcon.firmware.virt) 261 return 0; 262 263 err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware); 264 if (err < 0) 265 return err; 266 267 size = nvdec->falcon.firmware.size; 268 269 if (!client->group) { 270 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); 271 272 err = dma_mapping_error(nvdec->dev, iova); 273 if (err < 0) 274 return err; 275 } else { 276 virt = tegra_drm_alloc(tegra, size, &iova); 277 } 278 279 nvdec->falcon.firmware.virt = virt; 280 nvdec->falcon.firmware.iova = iova; 281 282 err = falcon_load_firmware(&nvdec->falcon); 283 if (err < 0) 284 goto cleanup; 285 286 /* 287 * In this case we have received an IOVA from the shared domain, so we 288 * need to make sure to get the physical address so that the DMA API 289 * knows what memory pages to flush the cache for. 290 */ 291 if (client->group) { 292 dma_addr_t phys; 293 294 phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE); 295 296 err = dma_mapping_error(nvdec->dev, phys); 297 if (err < 0) 298 goto cleanup; 299 300 nvdec->falcon.firmware.phys = phys; 301 } 302 303 return 0; 304 305 cleanup: 306 if (!client->group) 307 dma_free_coherent(nvdec->dev, size, virt, iova); 308 else 309 tegra_drm_free(tegra, size, virt, iova); 310 311 return err; 312 } 313 314 static __maybe_unused int nvdec_runtime_resume(struct device *dev) 315 { 316 struct nvdec *nvdec = dev_get_drvdata(dev); 317 int err; 318 319 err = clk_bulk_prepare_enable(nvdec->num_clks, nvdec->clks); 320 if (err < 0) 321 return err; 322 323 usleep_range(10, 20); 324 325 if (nvdec->config->has_riscv) { 326 err = nvdec_boot_riscv(nvdec); 327 if (err < 0) 328 goto disable; 329 } else { 330 err = nvdec_load_falcon_firmware(nvdec); 331 if (err < 0) 332 goto disable; 333 334 err = nvdec_boot_falcon(nvdec); 335 if (err < 0) 336 goto disable; 337 } 338 339 return 0; 340 341 disable: 342 clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks); 343 return err; 344 } 345 346 static __maybe_unused int nvdec_runtime_suspend(struct device *dev) 347 { 348 struct nvdec *nvdec = dev_get_drvdata(dev); 349 350 host1x_channel_stop(nvdec->channel); 351 352 clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks); 353 354 return 0; 355 } 356 357 static int nvdec_open_channel(struct tegra_drm_client *client, 358 struct tegra_drm_context *context) 359 { 360 struct nvdec *nvdec = to_nvdec(client); 361 362 context->channel = host1x_channel_get(nvdec->channel); 363 if (!context->channel) 364 return -ENOMEM; 365 366 return 0; 367 } 368 369 static void nvdec_close_channel(struct tegra_drm_context *context) 370 { 371 host1x_channel_put(context->channel); 372 } 373 374 static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) 375 { 376 *supported = true; 377 378 return 0; 379 } 380 381 static const struct tegra_drm_client_ops nvdec_ops = { 382 .open_channel = nvdec_open_channel, 383 .close_channel = nvdec_close_channel, 384 .submit = tegra_drm_submit, 385 .get_streamid_offset = tegra_drm_get_streamid_offset_thi, 386 .can_use_memory_ctx = nvdec_can_use_memory_ctx, 387 }; 388 389 #define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin" 390 391 static const struct nvdec_config nvdec_t210_config = { 392 .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE, 393 .version = 0x21, 394 .supports_sid = false, 395 }; 396 397 #define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin" 398 399 static const struct nvdec_config nvdec_t186_config = { 400 .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE, 401 .version = 0x18, 402 .supports_sid = true, 403 }; 404 405 #define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin" 406 407 static const struct nvdec_config nvdec_t194_config = { 408 .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE, 409 .version = 0x19, 410 .supports_sid = true, 411 }; 412 413 static const struct nvdec_config nvdec_t234_config = { 414 .version = 0x23, 415 .supports_sid = true, 416 .has_riscv = true, 417 .has_extra_clocks = true, 418 }; 419 420 static const struct of_device_id tegra_nvdec_of_match[] = { 421 { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config }, 422 { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config }, 423 { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config }, 424 { .compatible = "nvidia,tegra234-nvdec", .data = &nvdec_t234_config }, 425 { }, 426 }; 427 MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match); 428 429 static int nvdec_probe(struct platform_device *pdev) 430 { 431 struct device *dev = &pdev->dev; 432 struct host1x_syncpt **syncpts; 433 struct nvdec *nvdec; 434 u32 host_class; 435 int err; 436 437 /* inherit DMA mask from host1x parent */ 438 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 439 if (err < 0) { 440 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 441 return err; 442 } 443 444 nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL); 445 if (!nvdec) 446 return -ENOMEM; 447 448 nvdec->config = of_device_get_match_data(dev); 449 450 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 451 if (!syncpts) 452 return -ENOMEM; 453 454 nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 455 if (IS_ERR(nvdec->regs)) 456 return PTR_ERR(nvdec->regs); 457 458 nvdec->clks[0].id = "nvdec"; 459 nvdec->num_clks = 1; 460 461 if (nvdec->config->has_extra_clocks) { 462 nvdec->num_clks = 3; 463 nvdec->clks[1].id = "fuse"; 464 nvdec->clks[2].id = "tsec_pka"; 465 } 466 467 err = devm_clk_bulk_get(dev, nvdec->num_clks, nvdec->clks); 468 if (err) { 469 dev_err(&pdev->dev, "failed to get clock(s)\n"); 470 return err; 471 } 472 473 err = clk_set_rate(nvdec->clks[0].clk, ULONG_MAX); 474 if (err < 0) { 475 dev_err(&pdev->dev, "failed to set clock rate\n"); 476 return err; 477 } 478 479 err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class); 480 if (err < 0) 481 host_class = HOST1X_CLASS_NVDEC; 482 483 if (nvdec->config->has_riscv) { 484 struct tegra_mc *mc; 485 486 mc = devm_tegra_memory_controller_get(dev); 487 if (IS_ERR(mc)) { 488 dev_err_probe(dev, PTR_ERR(mc), 489 "failed to get memory controller handle\n"); 490 return PTR_ERR(mc); 491 } 492 493 err = tegra_mc_get_carveout_info(mc, 1, &nvdec->carveout_base, NULL); 494 if (err) { 495 dev_err(dev, "failed to get carveout info: %d\n", err); 496 return err; 497 } 498 499 nvdec->reset = devm_reset_control_get_exclusive_released(dev, "nvdec"); 500 if (IS_ERR(nvdec->reset)) { 501 dev_err_probe(dev, PTR_ERR(nvdec->reset), "failed to get reset\n"); 502 return PTR_ERR(nvdec->reset); 503 } 504 505 nvdec->riscv.dev = dev; 506 nvdec->riscv.regs = nvdec->regs; 507 508 err = tegra_drm_riscv_read_descriptors(&nvdec->riscv); 509 if (err < 0) 510 return err; 511 } else { 512 nvdec->falcon.dev = dev; 513 nvdec->falcon.regs = nvdec->regs; 514 515 err = falcon_init(&nvdec->falcon); 516 if (err < 0) 517 return err; 518 } 519 520 platform_set_drvdata(pdev, nvdec); 521 522 INIT_LIST_HEAD(&nvdec->client.base.list); 523 nvdec->client.base.ops = &nvdec_client_ops; 524 nvdec->client.base.dev = dev; 525 nvdec->client.base.class = host_class; 526 nvdec->client.base.syncpts = syncpts; 527 nvdec->client.base.num_syncpts = 1; 528 nvdec->dev = dev; 529 530 INIT_LIST_HEAD(&nvdec->client.list); 531 nvdec->client.version = nvdec->config->version; 532 nvdec->client.ops = &nvdec_ops; 533 534 err = host1x_client_register(&nvdec->client.base); 535 if (err < 0) { 536 dev_err(dev, "failed to register host1x client: %d\n", err); 537 goto exit_falcon; 538 } 539 540 return 0; 541 542 exit_falcon: 543 falcon_exit(&nvdec->falcon); 544 545 return err; 546 } 547 548 static void nvdec_remove(struct platform_device *pdev) 549 { 550 struct nvdec *nvdec = platform_get_drvdata(pdev); 551 552 host1x_client_unregister(&nvdec->client.base); 553 554 falcon_exit(&nvdec->falcon); 555 } 556 557 static const struct dev_pm_ops nvdec_pm_ops = { 558 SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL) 559 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 560 pm_runtime_force_resume) 561 }; 562 563 struct platform_driver tegra_nvdec_driver = { 564 .driver = { 565 .name = "tegra-nvdec", 566 .of_match_table = tegra_nvdec_of_match, 567 .pm = &nvdec_pm_ops 568 }, 569 .probe = nvdec_probe, 570 .remove_new = nvdec_remove, 571 }; 572 573 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 574 MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE); 575 #endif 576 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 577 MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE); 578 #endif 579 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 580 MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE); 581 #endif 582