1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 27 #include <asm/dma-iommu.h> 28 #endif 29 30 static int 31 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 32 { 33 int ret; 34 35 if (tdev->vdd) { 36 ret = regulator_enable(tdev->vdd); 37 if (ret) 38 goto err_power; 39 } 40 41 ret = clk_prepare_enable(tdev->clk); 42 if (ret) 43 goto err_clk; 44 ret = clk_prepare_enable(tdev->clk_ref); 45 if (ret) 46 goto err_clk_ref; 47 ret = clk_prepare_enable(tdev->clk_pwr); 48 if (ret) 49 goto err_clk_pwr; 50 clk_set_rate(tdev->clk_pwr, 204000000); 51 udelay(10); 52 53 if (!tdev->pdev->dev.pm_domain) { 54 reset_control_assert(tdev->rst); 55 udelay(10); 56 57 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 58 if (ret) 59 goto err_clamp; 60 udelay(10); 61 62 reset_control_deassert(tdev->rst); 63 udelay(10); 64 } 65 66 return 0; 67 68 err_clamp: 69 clk_disable_unprepare(tdev->clk_pwr); 70 err_clk_pwr: 71 clk_disable_unprepare(tdev->clk_ref); 72 err_clk_ref: 73 clk_disable_unprepare(tdev->clk); 74 err_clk: 75 if (tdev->vdd) 76 regulator_disable(tdev->vdd); 77 err_power: 78 return ret; 79 } 80 81 static int 82 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 83 { 84 int ret; 85 86 clk_disable_unprepare(tdev->clk_pwr); 87 clk_disable_unprepare(tdev->clk_ref); 88 clk_disable_unprepare(tdev->clk); 89 udelay(10); 90 91 if (tdev->vdd) { 92 ret = regulator_disable(tdev->vdd); 93 if (ret) 94 return ret; 95 } 96 97 return 0; 98 } 99 100 static void 101 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 102 { 103 #if IS_ENABLED(CONFIG_IOMMU_API) 104 struct device *dev = &tdev->pdev->dev; 105 unsigned long pgsize_bitmap; 106 int ret; 107 108 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 109 if (dev->archdata.mapping) { 110 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 111 112 arm_iommu_detach_device(dev); 113 arm_iommu_release_mapping(mapping); 114 } 115 #endif 116 117 if (!tdev->func->iommu_bit) 118 return; 119 120 mutex_init(&tdev->iommu.mutex); 121 122 if (device_iommu_mapped(dev)) { 123 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 124 if (!tdev->iommu.domain) 125 goto error; 126 127 /* 128 * A IOMMU is only usable if it supports page sizes smaller 129 * or equal to the system's PAGE_SIZE, with a preference if 130 * both are equal. 131 */ 132 pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap; 133 if (pgsize_bitmap & PAGE_SIZE) { 134 tdev->iommu.pgshift = PAGE_SHIFT; 135 } else { 136 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 137 if (tdev->iommu.pgshift == 0) { 138 dev_warn(dev, "unsupported IOMMU page size\n"); 139 goto free_domain; 140 } 141 tdev->iommu.pgshift -= 1; 142 } 143 144 ret = iommu_attach_device(tdev->iommu.domain, dev); 145 if (ret) 146 goto free_domain; 147 148 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, 149 (1ULL << tdev->func->iommu_bit) >> 150 tdev->iommu.pgshift, 1); 151 if (ret) 152 goto detach_device; 153 } 154 155 return; 156 157 detach_device: 158 iommu_detach_device(tdev->iommu.domain, dev); 159 160 free_domain: 161 iommu_domain_free(tdev->iommu.domain); 162 163 error: 164 tdev->iommu.domain = NULL; 165 tdev->iommu.pgshift = 0; 166 dev_err(dev, "cannot initialize IOMMU MM\n"); 167 #endif 168 } 169 170 static void 171 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 172 { 173 #if IS_ENABLED(CONFIG_IOMMU_API) 174 if (tdev->iommu.domain) { 175 nvkm_mm_fini(&tdev->iommu.mm); 176 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 177 iommu_domain_free(tdev->iommu.domain); 178 } 179 #endif 180 } 181 182 static struct nvkm_device_tegra * 183 nvkm_device_tegra(struct nvkm_device *device) 184 { 185 return container_of(device, struct nvkm_device_tegra, device); 186 } 187 188 static struct resource * 189 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 190 { 191 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 192 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 193 } 194 195 static resource_size_t 196 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 197 { 198 struct resource *res = nvkm_device_tegra_resource(device, bar); 199 return res ? res->start : 0; 200 } 201 202 static resource_size_t 203 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 204 { 205 struct resource *res = nvkm_device_tegra_resource(device, bar); 206 return res ? resource_size(res) : 0; 207 } 208 209 static irqreturn_t 210 nvkm_device_tegra_intr(int irq, void *arg) 211 { 212 struct nvkm_device_tegra *tdev = arg; 213 struct nvkm_device *device = &tdev->device; 214 bool handled = false; 215 nvkm_mc_intr_unarm(device); 216 nvkm_mc_intr(device, &handled); 217 nvkm_mc_intr_rearm(device); 218 return handled ? IRQ_HANDLED : IRQ_NONE; 219 } 220 221 static void 222 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 223 { 224 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 225 if (tdev->irq) { 226 free_irq(tdev->irq, tdev); 227 tdev->irq = 0; 228 } 229 } 230 231 static int 232 nvkm_device_tegra_init(struct nvkm_device *device) 233 { 234 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 235 int irq, ret; 236 237 irq = platform_get_irq_byname(tdev->pdev, "stall"); 238 if (irq < 0) 239 return irq; 240 241 ret = request_irq(irq, nvkm_device_tegra_intr, 242 IRQF_SHARED, "nvkm", tdev); 243 if (ret) 244 return ret; 245 246 tdev->irq = irq; 247 return 0; 248 } 249 250 static void * 251 nvkm_device_tegra_dtor(struct nvkm_device *device) 252 { 253 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 254 nvkm_device_tegra_power_down(tdev); 255 nvkm_device_tegra_remove_iommu(tdev); 256 return tdev; 257 } 258 259 static const struct nvkm_device_func 260 nvkm_device_tegra_func = { 261 .tegra = nvkm_device_tegra, 262 .dtor = nvkm_device_tegra_dtor, 263 .init = nvkm_device_tegra_init, 264 .fini = nvkm_device_tegra_fini, 265 .resource_addr = nvkm_device_tegra_resource_addr, 266 .resource_size = nvkm_device_tegra_resource_size, 267 .cpu_coherent = false, 268 }; 269 270 int 271 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 272 struct platform_device *pdev, 273 const char *cfg, const char *dbg, 274 bool detect, bool mmio, u64 subdev_mask, 275 struct nvkm_device **pdevice) 276 { 277 struct nvkm_device_tegra *tdev; 278 unsigned long rate; 279 int ret; 280 281 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 282 return -ENOMEM; 283 284 tdev->func = func; 285 tdev->pdev = pdev; 286 287 if (func->require_vdd) { 288 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 289 if (IS_ERR(tdev->vdd)) { 290 ret = PTR_ERR(tdev->vdd); 291 goto free; 292 } 293 } 294 295 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 296 if (IS_ERR(tdev->rst)) { 297 ret = PTR_ERR(tdev->rst); 298 goto free; 299 } 300 301 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 302 if (IS_ERR(tdev->clk)) { 303 ret = PTR_ERR(tdev->clk); 304 goto free; 305 } 306 307 rate = clk_get_rate(tdev->clk); 308 if (rate == 0) { 309 ret = clk_set_rate(tdev->clk, ULONG_MAX); 310 if (ret < 0) 311 goto free; 312 313 rate = clk_get_rate(tdev->clk); 314 315 dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate); 316 } 317 318 if (func->require_ref_clk) 319 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); 320 if (IS_ERR(tdev->clk_ref)) { 321 ret = PTR_ERR(tdev->clk_ref); 322 goto free; 323 } 324 325 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 326 if (IS_ERR(tdev->clk_pwr)) { 327 ret = PTR_ERR(tdev->clk_pwr); 328 goto free; 329 } 330 331 /** 332 * The IOMMU bit defines the upper limit of the GPU-addressable space. 333 */ 334 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); 335 if (ret) 336 goto free; 337 338 nvkm_device_tegra_probe_iommu(tdev); 339 340 ret = nvkm_device_tegra_power_up(tdev); 341 if (ret) 342 goto remove; 343 344 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 345 tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; 346 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 347 NVKM_DEVICE_TEGRA, pdev->id, NULL, 348 cfg, dbg, detect, mmio, subdev_mask, 349 &tdev->device); 350 if (ret) 351 goto powerdown; 352 353 *pdevice = &tdev->device; 354 355 return 0; 356 357 powerdown: 358 nvkm_device_tegra_power_down(tdev); 359 remove: 360 nvkm_device_tegra_remove_iommu(tdev); 361 free: 362 kfree(tdev); 363 return ret; 364 } 365 #else 366 int 367 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 368 struct platform_device *pdev, 369 const char *cfg, const char *dbg, 370 bool detect, bool mmio, u64 subdev_mask, 371 struct nvkm_device **pdevice) 372 { 373 return -ENOSYS; 374 } 375 #endif 376