1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 static int 27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 28 { 29 int ret; 30 31 if (tdev->vdd) { 32 ret = regulator_enable(tdev->vdd); 33 if (ret) 34 goto err_power; 35 } 36 37 ret = clk_prepare_enable(tdev->clk); 38 if (ret) 39 goto err_clk; 40 if (tdev->clk_ref) { 41 ret = clk_prepare_enable(tdev->clk_ref); 42 if (ret) 43 goto err_clk_ref; 44 } 45 ret = clk_prepare_enable(tdev->clk_pwr); 46 if (ret) 47 goto err_clk_pwr; 48 clk_set_rate(tdev->clk_pwr, 204000000); 49 udelay(10); 50 51 reset_control_assert(tdev->rst); 52 udelay(10); 53 54 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 55 if (ret) 56 goto err_clamp; 57 udelay(10); 58 59 reset_control_deassert(tdev->rst); 60 udelay(10); 61 62 return 0; 63 64 err_clamp: 65 clk_disable_unprepare(tdev->clk_pwr); 66 err_clk_pwr: 67 if (tdev->clk_ref) 68 clk_disable_unprepare(tdev->clk_ref); 69 err_clk_ref: 70 clk_disable_unprepare(tdev->clk); 71 err_clk: 72 if (tdev->vdd) 73 regulator_disable(tdev->vdd); 74 err_power: 75 return ret; 76 } 77 78 static int 79 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 80 { 81 int ret; 82 83 reset_control_assert(tdev->rst); 84 udelay(10); 85 86 clk_disable_unprepare(tdev->clk_pwr); 87 if (tdev->clk_ref) 88 clk_disable_unprepare(tdev->clk_ref); 89 clk_disable_unprepare(tdev->clk); 90 udelay(10); 91 92 if (tdev->vdd) { 93 ret = regulator_disable(tdev->vdd); 94 if (ret) 95 return ret; 96 } 97 98 return 0; 99 } 100 101 static void 102 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 103 { 104 #if IS_ENABLED(CONFIG_IOMMU_API) 105 struct device *dev = &tdev->pdev->dev; 106 unsigned long pgsize_bitmap; 107 int ret; 108 109 if (!tdev->func->iommu_bit) 110 return; 111 112 mutex_init(&tdev->iommu.mutex); 113 114 if (iommu_present(&platform_bus_type)) { 115 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 116 if (!tdev->iommu.domain) 117 goto error; 118 119 /* 120 * A IOMMU is only usable if it supports page sizes smaller 121 * or equal to the system's PAGE_SIZE, with a preference if 122 * both are equal. 123 */ 124 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; 125 if (pgsize_bitmap & PAGE_SIZE) { 126 tdev->iommu.pgshift = PAGE_SHIFT; 127 } else { 128 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 129 if (tdev->iommu.pgshift == 0) { 130 dev_warn(dev, "unsupported IOMMU page size\n"); 131 goto free_domain; 132 } 133 tdev->iommu.pgshift -= 1; 134 } 135 136 ret = iommu_attach_device(tdev->iommu.domain, dev); 137 if (ret) 138 goto free_domain; 139 140 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 141 (1ULL << tdev->func->iommu_bit) >> 142 tdev->iommu.pgshift, 1); 143 if (ret) 144 goto detach_device; 145 } 146 147 return; 148 149 detach_device: 150 iommu_detach_device(tdev->iommu.domain, dev); 151 152 free_domain: 153 iommu_domain_free(tdev->iommu.domain); 154 155 error: 156 tdev->iommu.domain = NULL; 157 tdev->iommu.pgshift = 0; 158 dev_err(dev, "cannot initialize IOMMU MM\n"); 159 #endif 160 } 161 162 static void 163 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 164 { 165 #if IS_ENABLED(CONFIG_IOMMU_API) 166 if (tdev->iommu.domain) { 167 nvkm_mm_fini(&tdev->iommu.mm); 168 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 169 iommu_domain_free(tdev->iommu.domain); 170 } 171 #endif 172 } 173 174 static struct nvkm_device_tegra * 175 nvkm_device_tegra(struct nvkm_device *device) 176 { 177 return container_of(device, struct nvkm_device_tegra, device); 178 } 179 180 static struct resource * 181 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 182 { 183 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 184 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 185 } 186 187 static resource_size_t 188 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 189 { 190 struct resource *res = nvkm_device_tegra_resource(device, bar); 191 return res ? res->start : 0; 192 } 193 194 static resource_size_t 195 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 196 { 197 struct resource *res = nvkm_device_tegra_resource(device, bar); 198 return res ? resource_size(res) : 0; 199 } 200 201 static irqreturn_t 202 nvkm_device_tegra_intr(int irq, void *arg) 203 { 204 struct nvkm_device_tegra *tdev = arg; 205 struct nvkm_device *device = &tdev->device; 206 bool handled = false; 207 nvkm_mc_intr_unarm(device); 208 nvkm_mc_intr(device, &handled); 209 nvkm_mc_intr_rearm(device); 210 return handled ? IRQ_HANDLED : IRQ_NONE; 211 } 212 213 static void 214 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 215 { 216 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 217 if (tdev->irq) { 218 free_irq(tdev->irq, tdev); 219 tdev->irq = 0; 220 }; 221 } 222 223 static int 224 nvkm_device_tegra_init(struct nvkm_device *device) 225 { 226 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 227 int irq, ret; 228 229 irq = platform_get_irq_byname(tdev->pdev, "stall"); 230 if (irq < 0) 231 return irq; 232 233 ret = request_irq(irq, nvkm_device_tegra_intr, 234 IRQF_SHARED, "nvkm", tdev); 235 if (ret) 236 return ret; 237 238 tdev->irq = irq; 239 return 0; 240 } 241 242 static void * 243 nvkm_device_tegra_dtor(struct nvkm_device *device) 244 { 245 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 246 nvkm_device_tegra_power_down(tdev); 247 nvkm_device_tegra_remove_iommu(tdev); 248 return tdev; 249 } 250 251 static const struct nvkm_device_func 252 nvkm_device_tegra_func = { 253 .tegra = nvkm_device_tegra, 254 .dtor = nvkm_device_tegra_dtor, 255 .init = nvkm_device_tegra_init, 256 .fini = nvkm_device_tegra_fini, 257 .resource_addr = nvkm_device_tegra_resource_addr, 258 .resource_size = nvkm_device_tegra_resource_size, 259 .cpu_coherent = false, 260 }; 261 262 int 263 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 264 struct platform_device *pdev, 265 const char *cfg, const char *dbg, 266 bool detect, bool mmio, u64 subdev_mask, 267 struct nvkm_device **pdevice) 268 { 269 struct nvkm_device_tegra *tdev; 270 int ret; 271 272 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 273 return -ENOMEM; 274 275 tdev->func = func; 276 tdev->pdev = pdev; 277 278 if (func->require_vdd) { 279 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 280 if (IS_ERR(tdev->vdd)) { 281 ret = PTR_ERR(tdev->vdd); 282 goto free; 283 } 284 } 285 286 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 287 if (IS_ERR(tdev->rst)) { 288 ret = PTR_ERR(tdev->rst); 289 goto free; 290 } 291 292 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 293 if (IS_ERR(tdev->clk)) { 294 ret = PTR_ERR(tdev->clk); 295 goto free; 296 } 297 298 if (func->require_ref_clk) 299 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); 300 if (IS_ERR(tdev->clk_ref)) { 301 ret = PTR_ERR(tdev->clk_ref); 302 goto free; 303 } 304 305 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 306 if (IS_ERR(tdev->clk_pwr)) { 307 ret = PTR_ERR(tdev->clk_pwr); 308 goto free; 309 } 310 311 /** 312 * The IOMMU bit defines the upper limit of the GPU-addressable space. 313 * This will be refined in nouveau_ttm_init but we need to do it early 314 * for instmem to behave properly 315 */ 316 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); 317 if (ret) 318 goto free; 319 320 nvkm_device_tegra_probe_iommu(tdev); 321 322 ret = nvkm_device_tegra_power_up(tdev); 323 if (ret) 324 goto remove; 325 326 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 327 tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; 328 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 329 NVKM_DEVICE_TEGRA, pdev->id, NULL, 330 cfg, dbg, detect, mmio, subdev_mask, 331 &tdev->device); 332 if (ret) 333 goto powerdown; 334 335 *pdevice = &tdev->device; 336 337 return 0; 338 339 powerdown: 340 nvkm_device_tegra_power_down(tdev); 341 remove: 342 nvkm_device_tegra_remove_iommu(tdev); 343 free: 344 kfree(tdev); 345 return ret; 346 } 347 #else 348 int 349 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 350 struct platform_device *pdev, 351 const char *cfg, const char *dbg, 352 bool detect, bool mmio, u64 subdev_mask, 353 struct nvkm_device **pdevice) 354 { 355 return -ENOSYS; 356 } 357 #endif 358