1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 static int 27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 28 { 29 int ret; 30 31 if (tdev->vdd) { 32 ret = regulator_enable(tdev->vdd); 33 if (ret) 34 goto err_power; 35 } 36 37 ret = clk_prepare_enable(tdev->clk); 38 if (ret) 39 goto err_clk; 40 if (tdev->clk_ref) { 41 ret = clk_prepare_enable(tdev->clk_ref); 42 if (ret) 43 goto err_clk_ref; 44 } 45 ret = clk_prepare_enable(tdev->clk_pwr); 46 if (ret) 47 goto err_clk_pwr; 48 clk_set_rate(tdev->clk_pwr, 204000000); 49 udelay(10); 50 51 reset_control_assert(tdev->rst); 52 udelay(10); 53 54 if (!tdev->pdev->dev.pm_domain) { 55 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 56 if (ret) 57 goto err_clamp; 58 udelay(10); 59 } 60 61 reset_control_deassert(tdev->rst); 62 udelay(10); 63 64 return 0; 65 66 err_clamp: 67 clk_disable_unprepare(tdev->clk_pwr); 68 err_clk_pwr: 69 if (tdev->clk_ref) 70 clk_disable_unprepare(tdev->clk_ref); 71 err_clk_ref: 72 clk_disable_unprepare(tdev->clk); 73 err_clk: 74 if (tdev->vdd) 75 regulator_disable(tdev->vdd); 76 err_power: 77 return ret; 78 } 79 80 static int 81 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 82 { 83 int ret; 84 85 clk_disable_unprepare(tdev->clk_pwr); 86 if (tdev->clk_ref) 87 clk_disable_unprepare(tdev->clk_ref); 88 clk_disable_unprepare(tdev->clk); 89 udelay(10); 90 91 if (tdev->vdd) { 92 ret = regulator_disable(tdev->vdd); 93 if (ret) 94 return ret; 95 } 96 97 return 0; 98 } 99 100 static void 101 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 102 { 103 #if IS_ENABLED(CONFIG_IOMMU_API) 104 struct device *dev = &tdev->pdev->dev; 105 unsigned long pgsize_bitmap; 106 int ret; 107 108 if (!tdev->func->iommu_bit) 109 return; 110 111 mutex_init(&tdev->iommu.mutex); 112 113 if (iommu_present(&platform_bus_type)) { 114 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 115 if (!tdev->iommu.domain) 116 goto error; 117 118 /* 119 * A IOMMU is only usable if it supports page sizes smaller 120 * or equal to the system's PAGE_SIZE, with a preference if 121 * both are equal. 122 */ 123 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; 124 if (pgsize_bitmap & PAGE_SIZE) { 125 tdev->iommu.pgshift = PAGE_SHIFT; 126 } else { 127 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 128 if (tdev->iommu.pgshift == 0) { 129 dev_warn(dev, "unsupported IOMMU page size\n"); 130 goto free_domain; 131 } 132 tdev->iommu.pgshift -= 1; 133 } 134 135 ret = iommu_attach_device(tdev->iommu.domain, dev); 136 if (ret) 137 goto free_domain; 138 139 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, 140 (1ULL << tdev->func->iommu_bit) >> 141 tdev->iommu.pgshift, 1); 142 if (ret) 143 goto detach_device; 144 } 145 146 return; 147 148 detach_device: 149 iommu_detach_device(tdev->iommu.domain, dev); 150 151 free_domain: 152 iommu_domain_free(tdev->iommu.domain); 153 154 error: 155 tdev->iommu.domain = NULL; 156 tdev->iommu.pgshift = 0; 157 dev_err(dev, "cannot initialize IOMMU MM\n"); 158 #endif 159 } 160 161 static void 162 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 163 { 164 #if IS_ENABLED(CONFIG_IOMMU_API) 165 if (tdev->iommu.domain) { 166 nvkm_mm_fini(&tdev->iommu.mm); 167 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 168 iommu_domain_free(tdev->iommu.domain); 169 } 170 #endif 171 } 172 173 static struct nvkm_device_tegra * 174 nvkm_device_tegra(struct nvkm_device *device) 175 { 176 return container_of(device, struct nvkm_device_tegra, device); 177 } 178 179 static struct resource * 180 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 181 { 182 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 183 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 184 } 185 186 static resource_size_t 187 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 188 { 189 struct resource *res = nvkm_device_tegra_resource(device, bar); 190 return res ? res->start : 0; 191 } 192 193 static resource_size_t 194 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 195 { 196 struct resource *res = nvkm_device_tegra_resource(device, bar); 197 return res ? resource_size(res) : 0; 198 } 199 200 static irqreturn_t 201 nvkm_device_tegra_intr(int irq, void *arg) 202 { 203 struct nvkm_device_tegra *tdev = arg; 204 struct nvkm_device *device = &tdev->device; 205 bool handled = false; 206 nvkm_mc_intr_unarm(device); 207 nvkm_mc_intr(device, &handled); 208 nvkm_mc_intr_rearm(device); 209 return handled ? IRQ_HANDLED : IRQ_NONE; 210 } 211 212 static void 213 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 214 { 215 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 216 if (tdev->irq) { 217 free_irq(tdev->irq, tdev); 218 tdev->irq = 0; 219 } 220 } 221 222 static int 223 nvkm_device_tegra_init(struct nvkm_device *device) 224 { 225 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 226 int irq, ret; 227 228 irq = platform_get_irq_byname(tdev->pdev, "stall"); 229 if (irq < 0) 230 return irq; 231 232 ret = request_irq(irq, nvkm_device_tegra_intr, 233 IRQF_SHARED, "nvkm", tdev); 234 if (ret) 235 return ret; 236 237 tdev->irq = irq; 238 return 0; 239 } 240 241 static void * 242 nvkm_device_tegra_dtor(struct nvkm_device *device) 243 { 244 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 245 nvkm_device_tegra_power_down(tdev); 246 nvkm_device_tegra_remove_iommu(tdev); 247 return tdev; 248 } 249 250 static const struct nvkm_device_func 251 nvkm_device_tegra_func = { 252 .tegra = nvkm_device_tegra, 253 .dtor = nvkm_device_tegra_dtor, 254 .init = nvkm_device_tegra_init, 255 .fini = nvkm_device_tegra_fini, 256 .resource_addr = nvkm_device_tegra_resource_addr, 257 .resource_size = nvkm_device_tegra_resource_size, 258 .cpu_coherent = false, 259 }; 260 261 int 262 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 263 struct platform_device *pdev, 264 const char *cfg, const char *dbg, 265 bool detect, bool mmio, u64 subdev_mask, 266 struct nvkm_device **pdevice) 267 { 268 struct nvkm_device_tegra *tdev; 269 int ret; 270 271 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 272 return -ENOMEM; 273 274 tdev->func = func; 275 tdev->pdev = pdev; 276 277 if (func->require_vdd) { 278 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 279 if (IS_ERR(tdev->vdd)) { 280 ret = PTR_ERR(tdev->vdd); 281 goto free; 282 } 283 } 284 285 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 286 if (IS_ERR(tdev->rst)) { 287 ret = PTR_ERR(tdev->rst); 288 goto free; 289 } 290 291 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 292 if (IS_ERR(tdev->clk)) { 293 ret = PTR_ERR(tdev->clk); 294 goto free; 295 } 296 297 if (func->require_ref_clk) 298 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); 299 if (IS_ERR(tdev->clk_ref)) { 300 ret = PTR_ERR(tdev->clk_ref); 301 goto free; 302 } 303 304 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 305 if (IS_ERR(tdev->clk_pwr)) { 306 ret = PTR_ERR(tdev->clk_pwr); 307 goto free; 308 } 309 310 /** 311 * The IOMMU bit defines the upper limit of the GPU-addressable space. 312 */ 313 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); 314 if (ret) 315 goto free; 316 317 nvkm_device_tegra_probe_iommu(tdev); 318 319 ret = nvkm_device_tegra_power_up(tdev); 320 if (ret) 321 goto remove; 322 323 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 324 tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; 325 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 326 NVKM_DEVICE_TEGRA, pdev->id, NULL, 327 cfg, dbg, detect, mmio, subdev_mask, 328 &tdev->device); 329 if (ret) 330 goto powerdown; 331 332 *pdevice = &tdev->device; 333 334 return 0; 335 336 powerdown: 337 nvkm_device_tegra_power_down(tdev); 338 remove: 339 nvkm_device_tegra_remove_iommu(tdev); 340 free: 341 kfree(tdev); 342 return ret; 343 } 344 #else 345 int 346 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 347 struct platform_device *pdev, 348 const char *cfg, const char *dbg, 349 bool detect, bool mmio, u64 subdev_mask, 350 struct nvkm_device **pdevice) 351 { 352 return -ENOSYS; 353 } 354 #endif 355