1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2021, NVIDIA Corporation. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/host1x.h> 9 #include <linux/iommu.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/of_platform.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/reset.h> 17 18 #include <soc/tegra/pmc.h> 19 20 #include "drm.h" 21 #include "falcon.h" 22 #include "vic.h" 23 24 struct nvdec_config { 25 const char *firmware; 26 unsigned int version; 27 bool supports_sid; 28 }; 29 30 struct nvdec { 31 struct falcon falcon; 32 33 void __iomem *regs; 34 struct tegra_drm_client client; 35 struct host1x_channel *channel; 36 struct device *dev; 37 struct clk *clk; 38 39 /* Platform configuration */ 40 const struct nvdec_config *config; 41 }; 42 43 static inline struct nvdec *to_nvdec(struct tegra_drm_client *client) 44 { 45 return container_of(client, struct nvdec, client); 46 } 47 48 static inline void nvdec_writel(struct nvdec *nvdec, u32 value, 49 unsigned int offset) 50 { 51 writel(value, nvdec->regs + offset); 52 } 53 54 static int nvdec_boot(struct nvdec *nvdec) 55 { 56 #ifdef CONFIG_IOMMU_API 57 struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev); 58 #endif 59 int err; 60 61 #ifdef CONFIG_IOMMU_API 62 if (nvdec->config->supports_sid && spec) { 63 u32 value; 64 65 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); 66 nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG); 67 68 if (spec->num_ids > 0) { 69 value = spec->ids[0] & 0xffff; 70 71 nvdec_writel(nvdec, value, VIC_THI_STREAMID0); 72 nvdec_writel(nvdec, value, VIC_THI_STREAMID1); 73 } 74 } 75 #endif 76 77 err = falcon_boot(&nvdec->falcon); 78 if (err < 0) 79 return err; 80 81 err = falcon_wait_idle(&nvdec->falcon); 82 if (err < 0) { 83 dev_err(nvdec->dev, "falcon boot timed out\n"); 84 return err; 85 } 86 87 return 0; 88 } 89 90 static int nvdec_init(struct host1x_client *client) 91 { 92 struct tegra_drm_client *drm = host1x_to_drm_client(client); 93 struct drm_device *dev = dev_get_drvdata(client->host); 94 struct tegra_drm *tegra = dev->dev_private; 95 struct nvdec *nvdec = to_nvdec(drm); 96 int err; 97 98 err = host1x_client_iommu_attach(client); 99 if (err < 0 && err != -ENODEV) { 100 dev_err(nvdec->dev, "failed to attach to domain: %d\n", err); 101 return err; 102 } 103 104 nvdec->channel = host1x_channel_request(client); 105 if (!nvdec->channel) { 106 err = -ENOMEM; 107 goto detach; 108 } 109 110 client->syncpts[0] = host1x_syncpt_request(client, 0); 111 if (!client->syncpts[0]) { 112 err = -ENOMEM; 113 goto free_channel; 114 } 115 116 err = tegra_drm_register_client(tegra, drm); 117 if (err < 0) 118 goto free_syncpt; 119 120 /* 121 * Inherit the DMA parameters (such as maximum segment size) from the 122 * parent host1x device. 123 */ 124 client->dev->dma_parms = client->host->dma_parms; 125 126 return 0; 127 128 free_syncpt: 129 host1x_syncpt_put(client->syncpts[0]); 130 free_channel: 131 host1x_channel_put(nvdec->channel); 132 detach: 133 host1x_client_iommu_detach(client); 134 135 return err; 136 } 137 138 static int nvdec_exit(struct host1x_client *client) 139 { 140 struct tegra_drm_client *drm = host1x_to_drm_client(client); 141 struct drm_device *dev = dev_get_drvdata(client->host); 142 struct tegra_drm *tegra = dev->dev_private; 143 struct nvdec *nvdec = to_nvdec(drm); 144 int err; 145 146 /* avoid a dangling pointer just in case this disappears */ 147 client->dev->dma_parms = NULL; 148 149 err = tegra_drm_unregister_client(tegra, drm); 150 if (err < 0) 151 return err; 152 153 host1x_syncpt_put(client->syncpts[0]); 154 host1x_channel_put(nvdec->channel); 155 host1x_client_iommu_detach(client); 156 157 if (client->group) { 158 dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys, 159 nvdec->falcon.firmware.size, DMA_TO_DEVICE); 160 tegra_drm_free(tegra, nvdec->falcon.firmware.size, 161 nvdec->falcon.firmware.virt, 162 nvdec->falcon.firmware.iova); 163 } else { 164 dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size, 165 nvdec->falcon.firmware.virt, 166 nvdec->falcon.firmware.iova); 167 } 168 169 return 0; 170 } 171 172 static const struct host1x_client_ops nvdec_client_ops = { 173 .init = nvdec_init, 174 .exit = nvdec_exit, 175 }; 176 177 static int nvdec_load_firmware(struct nvdec *nvdec) 178 { 179 struct host1x_client *client = &nvdec->client.base; 180 struct tegra_drm *tegra = nvdec->client.drm; 181 dma_addr_t iova; 182 size_t size; 183 void *virt; 184 int err; 185 186 if (nvdec->falcon.firmware.virt) 187 return 0; 188 189 err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware); 190 if (err < 0) 191 return err; 192 193 size = nvdec->falcon.firmware.size; 194 195 if (!client->group) { 196 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); 197 198 err = dma_mapping_error(nvdec->dev, iova); 199 if (err < 0) 200 return err; 201 } else { 202 virt = tegra_drm_alloc(tegra, size, &iova); 203 } 204 205 nvdec->falcon.firmware.virt = virt; 206 nvdec->falcon.firmware.iova = iova; 207 208 err = falcon_load_firmware(&nvdec->falcon); 209 if (err < 0) 210 goto cleanup; 211 212 /* 213 * In this case we have received an IOVA from the shared domain, so we 214 * need to make sure to get the physical address so that the DMA API 215 * knows what memory pages to flush the cache for. 216 */ 217 if (client->group) { 218 dma_addr_t phys; 219 220 phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE); 221 222 err = dma_mapping_error(nvdec->dev, phys); 223 if (err < 0) 224 goto cleanup; 225 226 nvdec->falcon.firmware.phys = phys; 227 } 228 229 return 0; 230 231 cleanup: 232 if (!client->group) 233 dma_free_coherent(nvdec->dev, size, virt, iova); 234 else 235 tegra_drm_free(tegra, size, virt, iova); 236 237 return err; 238 } 239 240 241 static __maybe_unused int nvdec_runtime_resume(struct device *dev) 242 { 243 struct nvdec *nvdec = dev_get_drvdata(dev); 244 int err; 245 246 err = clk_prepare_enable(nvdec->clk); 247 if (err < 0) 248 return err; 249 250 usleep_range(10, 20); 251 252 err = nvdec_load_firmware(nvdec); 253 if (err < 0) 254 goto disable; 255 256 err = nvdec_boot(nvdec); 257 if (err < 0) 258 goto disable; 259 260 return 0; 261 262 disable: 263 clk_disable_unprepare(nvdec->clk); 264 return err; 265 } 266 267 static __maybe_unused int nvdec_runtime_suspend(struct device *dev) 268 { 269 struct nvdec *nvdec = dev_get_drvdata(dev); 270 271 clk_disable_unprepare(nvdec->clk); 272 273 return 0; 274 } 275 276 static int nvdec_open_channel(struct tegra_drm_client *client, 277 struct tegra_drm_context *context) 278 { 279 struct nvdec *nvdec = to_nvdec(client); 280 int err; 281 282 err = pm_runtime_get_sync(nvdec->dev); 283 if (err < 0) { 284 pm_runtime_put(nvdec->dev); 285 return err; 286 } 287 288 context->channel = host1x_channel_get(nvdec->channel); 289 if (!context->channel) { 290 pm_runtime_put(nvdec->dev); 291 return -ENOMEM; 292 } 293 294 return 0; 295 } 296 297 static void nvdec_close_channel(struct tegra_drm_context *context) 298 { 299 struct nvdec *nvdec = to_nvdec(context->client); 300 301 host1x_channel_put(context->channel); 302 pm_runtime_put(nvdec->dev); 303 } 304 305 static const struct tegra_drm_client_ops nvdec_ops = { 306 .open_channel = nvdec_open_channel, 307 .close_channel = nvdec_close_channel, 308 .submit = tegra_drm_submit, 309 }; 310 311 #define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin" 312 313 static const struct nvdec_config nvdec_t210_config = { 314 .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE, 315 .version = 0x21, 316 .supports_sid = false, 317 }; 318 319 #define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin" 320 321 static const struct nvdec_config nvdec_t186_config = { 322 .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE, 323 .version = 0x18, 324 .supports_sid = true, 325 }; 326 327 #define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin" 328 329 static const struct nvdec_config nvdec_t194_config = { 330 .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE, 331 .version = 0x19, 332 .supports_sid = true, 333 }; 334 335 static const struct of_device_id tegra_nvdec_of_match[] = { 336 { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config }, 337 { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config }, 338 { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config }, 339 { }, 340 }; 341 MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match); 342 343 static int nvdec_probe(struct platform_device *pdev) 344 { 345 struct device *dev = &pdev->dev; 346 struct host1x_syncpt **syncpts; 347 struct nvdec *nvdec; 348 u32 host_class; 349 int err; 350 351 /* inherit DMA mask from host1x parent */ 352 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 353 if (err < 0) { 354 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 355 return err; 356 } 357 358 nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL); 359 if (!nvdec) 360 return -ENOMEM; 361 362 nvdec->config = of_device_get_match_data(dev); 363 364 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); 365 if (!syncpts) 366 return -ENOMEM; 367 368 nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 369 if (IS_ERR(nvdec->regs)) 370 return PTR_ERR(nvdec->regs); 371 372 nvdec->clk = devm_clk_get(dev, NULL); 373 if (IS_ERR(nvdec->clk)) { 374 dev_err(&pdev->dev, "failed to get clock\n"); 375 return PTR_ERR(nvdec->clk); 376 } 377 378 err = clk_set_rate(nvdec->clk, ULONG_MAX); 379 if (err < 0) { 380 dev_err(&pdev->dev, "failed to set clock rate\n"); 381 return err; 382 } 383 384 err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class); 385 if (err < 0) 386 host_class = HOST1X_CLASS_NVDEC; 387 388 nvdec->falcon.dev = dev; 389 nvdec->falcon.regs = nvdec->regs; 390 391 err = falcon_init(&nvdec->falcon); 392 if (err < 0) 393 return err; 394 395 platform_set_drvdata(pdev, nvdec); 396 397 INIT_LIST_HEAD(&nvdec->client.base.list); 398 nvdec->client.base.ops = &nvdec_client_ops; 399 nvdec->client.base.dev = dev; 400 nvdec->client.base.class = host_class; 401 nvdec->client.base.syncpts = syncpts; 402 nvdec->client.base.num_syncpts = 1; 403 nvdec->dev = dev; 404 405 INIT_LIST_HEAD(&nvdec->client.list); 406 nvdec->client.version = nvdec->config->version; 407 nvdec->client.ops = &nvdec_ops; 408 409 err = host1x_client_register(&nvdec->client.base); 410 if (err < 0) { 411 dev_err(dev, "failed to register host1x client: %d\n", err); 412 goto exit_falcon; 413 } 414 415 pm_runtime_enable(&pdev->dev); 416 pm_runtime_set_autosuspend_delay(&pdev->dev, 500); 417 pm_runtime_use_autosuspend(&pdev->dev); 418 419 return 0; 420 421 exit_falcon: 422 falcon_exit(&nvdec->falcon); 423 424 return err; 425 } 426 427 static int nvdec_remove(struct platform_device *pdev) 428 { 429 struct nvdec *nvdec = platform_get_drvdata(pdev); 430 int err; 431 432 err = host1x_client_unregister(&nvdec->client.base); 433 if (err < 0) { 434 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 435 err); 436 return err; 437 } 438 439 if (pm_runtime_enabled(&pdev->dev)) 440 pm_runtime_disable(&pdev->dev); 441 else 442 nvdec_runtime_suspend(&pdev->dev); 443 444 falcon_exit(&nvdec->falcon); 445 446 return 0; 447 } 448 449 static const struct dev_pm_ops nvdec_pm_ops = { 450 SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL) 451 }; 452 453 struct platform_driver tegra_nvdec_driver = { 454 .driver = { 455 .name = "tegra-nvdec", 456 .of_match_table = tegra_nvdec_of_match, 457 .pm = &nvdec_pm_ops 458 }, 459 .probe = nvdec_probe, 460 .remove = nvdec_remove, 461 }; 462 463 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 464 MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE); 465 #endif 466 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 467 MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE); 468 #endif 469 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 470 MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE); 471 #endif 472