1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Tegra host1x driver 4 * 5 * Copyright (c) 2010-2013, NVIDIA Corporation. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/io.h> 11 #include <linux/list.h> 12 #include <linux/module.h> 13 #include <linux/of_device.h> 14 #include <linux/of.h> 15 #include <linux/slab.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/host1x.h> 19 #undef CREATE_TRACE_POINTS 20 21 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 22 #include <asm/dma-iommu.h> 23 #endif 24 25 #include "bus.h" 26 #include "channel.h" 27 #include "debug.h" 28 #include "dev.h" 29 #include "intr.h" 30 31 #include "hw/host1x01.h" 32 #include "hw/host1x02.h" 33 #include "hw/host1x04.h" 34 #include "hw/host1x05.h" 35 #include "hw/host1x06.h" 36 #include "hw/host1x07.h" 37 38 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) 39 { 40 writel(v, host1x->hv_regs + r); 41 } 42 43 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r) 44 { 45 return readl(host1x->hv_regs + r); 46 } 47 48 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) 49 { 50 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; 51 52 writel(v, sync_regs + r); 53 } 54 55 u32 host1x_sync_readl(struct host1x *host1x, u32 r) 56 { 57 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; 58 59 return readl(sync_regs + r); 60 } 61 62 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) 63 { 64 writel(v, ch->regs + r); 65 } 66 67 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r) 68 { 69 return readl(ch->regs + r); 70 } 71 72 static const struct host1x_info host1x01_info = { 73 .nb_channels = 8, 74 .nb_pts = 32, 75 .nb_mlocks = 16, 76 .nb_bases = 8, 77 .init = host1x01_init, 78 .sync_offset = 0x3000, 79 .dma_mask = DMA_BIT_MASK(32), 80 .has_hypervisor = false, 81 .num_sid_entries = 0, 82 .sid_table = NULL, 83 }; 84 85 static const struct host1x_info host1x02_info = { 86 .nb_channels = 9, 87 .nb_pts = 32, 88 .nb_mlocks = 16, 89 .nb_bases = 12, 90 .init = host1x02_init, 91 .sync_offset = 0x3000, 92 .dma_mask = DMA_BIT_MASK(32), 93 .has_hypervisor = false, 94 .num_sid_entries = 0, 95 .sid_table = NULL, 96 }; 97 98 static const struct host1x_info host1x04_info = { 99 .nb_channels = 12, 100 .nb_pts = 192, 101 .nb_mlocks = 16, 102 .nb_bases = 64, 103 .init = host1x04_init, 104 .sync_offset = 0x2100, 105 .dma_mask = DMA_BIT_MASK(34), 106 .has_hypervisor = false, 107 .num_sid_entries = 0, 108 .sid_table = NULL, 109 }; 110 111 static const struct host1x_info host1x05_info = { 112 .nb_channels = 14, 113 .nb_pts = 192, 114 .nb_mlocks = 16, 115 .nb_bases = 64, 116 .init = host1x05_init, 117 .sync_offset = 0x2100, 118 .dma_mask = DMA_BIT_MASK(34), 119 .has_hypervisor = false, 120 .num_sid_entries = 0, 121 .sid_table = NULL, 122 }; 123 124 static const struct host1x_sid_entry tegra186_sid_table[] = { 125 { 126 /* VIC */ 127 .base = 0x1af0, 128 .offset = 0x30, 129 .limit = 0x34 130 }, 131 }; 132 133 static const struct host1x_info host1x06_info = { 134 .nb_channels = 63, 135 .nb_pts = 576, 136 .nb_mlocks = 24, 137 .nb_bases = 16, 138 .init = host1x06_init, 139 .sync_offset = 0x0, 140 .dma_mask = DMA_BIT_MASK(40), 141 .has_hypervisor = true, 142 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), 143 .sid_table = tegra186_sid_table, 144 }; 145 146 static const struct host1x_sid_entry tegra194_sid_table[] = { 147 { 148 /* VIC */ 149 .base = 0x1af0, 150 .offset = 0x30, 151 .limit = 0x34 152 }, 153 }; 154 155 static const struct host1x_info host1x07_info = { 156 .nb_channels = 63, 157 .nb_pts = 704, 158 .nb_mlocks = 32, 159 .nb_bases = 0, 160 .init = host1x07_init, 161 .sync_offset = 0x0, 162 .dma_mask = DMA_BIT_MASK(40), 163 .has_hypervisor = true, 164 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), 165 .sid_table = tegra194_sid_table, 166 }; 167 168 static const struct of_device_id host1x_of_match[] = { 169 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, }, 170 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, }, 171 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, 172 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, 173 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, 174 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, }, 175 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, }, 176 { }, 177 }; 178 MODULE_DEVICE_TABLE(of, host1x_of_match); 179 180 static void host1x_setup_sid_table(struct host1x *host) 181 { 182 const struct host1x_info *info = host->info; 183 unsigned int i; 184 185 for (i = 0; i < info->num_sid_entries; i++) { 186 const struct host1x_sid_entry *entry = &info->sid_table[i]; 187 188 host1x_hypervisor_writel(host, entry->offset, entry->base); 189 host1x_hypervisor_writel(host, entry->limit, entry->base + 4); 190 } 191 } 192 193 static int host1x_probe(struct platform_device *pdev) 194 { 195 struct host1x *host; 196 struct resource *regs, *hv_regs = NULL; 197 int syncpt_irq; 198 int err; 199 200 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 201 if (!host) 202 return -ENOMEM; 203 204 host->info = of_device_get_match_data(&pdev->dev); 205 206 if (host->info->has_hypervisor) { 207 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm"); 208 if (!regs) { 209 dev_err(&pdev->dev, "failed to get vm registers\n"); 210 return -ENXIO; 211 } 212 213 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, 214 "hypervisor"); 215 if (!hv_regs) { 216 dev_err(&pdev->dev, 217 "failed to get hypervisor registers\n"); 218 return -ENXIO; 219 } 220 } else { 221 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 222 if (!regs) { 223 dev_err(&pdev->dev, "failed to get registers\n"); 224 return -ENXIO; 225 } 226 } 227 228 syncpt_irq = platform_get_irq(pdev, 0); 229 if (syncpt_irq < 0) { 230 dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq); 231 return syncpt_irq; 232 } 233 234 mutex_init(&host->devices_lock); 235 INIT_LIST_HEAD(&host->devices); 236 INIT_LIST_HEAD(&host->list); 237 host->dev = &pdev->dev; 238 239 /* set common host1x device data */ 240 platform_set_drvdata(pdev, host); 241 242 host->regs = devm_ioremap_resource(&pdev->dev, regs); 243 if (IS_ERR(host->regs)) 244 return PTR_ERR(host->regs); 245 246 if (host->info->has_hypervisor) { 247 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs); 248 if (IS_ERR(host->hv_regs)) 249 return PTR_ERR(host->hv_regs); 250 } 251 252 host->dev->dma_parms = &host->dma_parms; 253 dma_set_max_seg_size(host->dev, UINT_MAX); 254 255 dma_set_mask_and_coherent(host->dev, host->info->dma_mask); 256 257 if (host->info->init) { 258 err = host->info->init(host); 259 if (err) 260 return err; 261 } 262 263 host->clk = devm_clk_get(&pdev->dev, NULL); 264 if (IS_ERR(host->clk)) { 265 err = PTR_ERR(host->clk); 266 267 if (err != -EPROBE_DEFER) 268 dev_err(&pdev->dev, "failed to get clock: %d\n", err); 269 270 return err; 271 } 272 273 host->rst = devm_reset_control_get(&pdev->dev, "host1x"); 274 if (IS_ERR(host->rst)) { 275 err = PTR_ERR(host->rst); 276 dev_err(&pdev->dev, "failed to get reset: %d\n", err); 277 return err; 278 } 279 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 280 if (host->dev->archdata.mapping) { 281 struct dma_iommu_mapping *mapping = 282 to_dma_iommu_mapping(host->dev); 283 arm_iommu_detach_device(host->dev); 284 arm_iommu_release_mapping(mapping); 285 } 286 #endif 287 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) 288 goto skip_iommu; 289 290 host->group = iommu_group_get(&pdev->dev); 291 if (host->group) { 292 struct iommu_domain_geometry *geometry; 293 u64 mask = dma_get_mask(host->dev); 294 dma_addr_t start, end; 295 unsigned long order; 296 297 err = iova_cache_get(); 298 if (err < 0) 299 goto put_group; 300 301 host->domain = iommu_domain_alloc(&platform_bus_type); 302 if (!host->domain) { 303 err = -ENOMEM; 304 goto put_cache; 305 } 306 307 err = iommu_attach_group(host->domain, host->group); 308 if (err) { 309 if (err == -ENODEV) { 310 iommu_domain_free(host->domain); 311 host->domain = NULL; 312 iova_cache_put(); 313 iommu_group_put(host->group); 314 host->group = NULL; 315 goto skip_iommu; 316 } 317 318 goto fail_free_domain; 319 } 320 321 geometry = &host->domain->geometry; 322 start = geometry->aperture_start & mask; 323 end = geometry->aperture_end & mask; 324 325 order = __ffs(host->domain->pgsize_bitmap); 326 init_iova_domain(&host->iova, 1UL << order, start >> order); 327 host->iova_end = end; 328 } 329 330 skip_iommu: 331 err = host1x_channel_list_init(&host->channel_list, 332 host->info->nb_channels); 333 if (err) { 334 dev_err(&pdev->dev, "failed to initialize channel list\n"); 335 goto fail_detach_device; 336 } 337 338 err = clk_prepare_enable(host->clk); 339 if (err < 0) { 340 dev_err(&pdev->dev, "failed to enable clock\n"); 341 goto fail_free_channels; 342 } 343 344 err = reset_control_deassert(host->rst); 345 if (err < 0) { 346 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); 347 goto fail_unprepare_disable; 348 } 349 350 err = host1x_syncpt_init(host); 351 if (err) { 352 dev_err(&pdev->dev, "failed to initialize syncpts\n"); 353 goto fail_reset_assert; 354 } 355 356 err = host1x_intr_init(host, syncpt_irq); 357 if (err) { 358 dev_err(&pdev->dev, "failed to initialize interrupts\n"); 359 goto fail_deinit_syncpt; 360 } 361 362 host1x_debug_init(host); 363 364 if (host->info->has_hypervisor) 365 host1x_setup_sid_table(host); 366 367 err = host1x_register(host); 368 if (err < 0) 369 goto fail_deinit_intr; 370 371 return 0; 372 373 fail_deinit_intr: 374 host1x_intr_deinit(host); 375 fail_deinit_syncpt: 376 host1x_syncpt_deinit(host); 377 fail_reset_assert: 378 reset_control_assert(host->rst); 379 fail_unprepare_disable: 380 clk_disable_unprepare(host->clk); 381 fail_free_channels: 382 host1x_channel_list_free(&host->channel_list); 383 fail_detach_device: 384 if (host->group && host->domain) { 385 put_iova_domain(&host->iova); 386 iommu_detach_group(host->domain, host->group); 387 } 388 fail_free_domain: 389 if (host->domain) 390 iommu_domain_free(host->domain); 391 put_cache: 392 if (host->group) 393 iova_cache_put(); 394 put_group: 395 iommu_group_put(host->group); 396 397 return err; 398 } 399 400 static int host1x_remove(struct platform_device *pdev) 401 { 402 struct host1x *host = platform_get_drvdata(pdev); 403 404 host1x_unregister(host); 405 host1x_debug_deinit(host); 406 host1x_intr_deinit(host); 407 host1x_syncpt_deinit(host); 408 reset_control_assert(host->rst); 409 clk_disable_unprepare(host->clk); 410 411 if (host->domain) { 412 put_iova_domain(&host->iova); 413 iommu_detach_group(host->domain, host->group); 414 iommu_domain_free(host->domain); 415 iova_cache_put(); 416 iommu_group_put(host->group); 417 } 418 419 return 0; 420 } 421 422 static struct platform_driver tegra_host1x_driver = { 423 .driver = { 424 .name = "tegra-host1x", 425 .of_match_table = host1x_of_match, 426 }, 427 .probe = host1x_probe, 428 .remove = host1x_remove, 429 }; 430 431 static struct platform_driver * const drivers[] = { 432 &tegra_host1x_driver, 433 &tegra_mipi_driver, 434 }; 435 436 static int __init tegra_host1x_init(void) 437 { 438 int err; 439 440 err = bus_register(&host1x_bus_type); 441 if (err < 0) 442 return err; 443 444 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 445 if (err < 0) 446 bus_unregister(&host1x_bus_type); 447 448 return err; 449 } 450 module_init(tegra_host1x_init); 451 452 static void __exit tegra_host1x_exit(void) 453 { 454 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 455 bus_unregister(&host1x_bus_type); 456 } 457 module_exit(tegra_host1x_exit); 458 459 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 460 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); 461 MODULE_DESCRIPTION("Host1x driver for Tegra products"); 462 MODULE_LICENSE("GPL"); 463