1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include <core/tegra.h> 23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER 24 #include "priv.h" 25 26 static int 27 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) 28 { 29 int ret; 30 31 ret = regulator_enable(tdev->vdd); 32 if (ret) 33 goto err_power; 34 35 ret = clk_prepare_enable(tdev->clk); 36 if (ret) 37 goto err_clk; 38 ret = clk_prepare_enable(tdev->clk_pwr); 39 if (ret) 40 goto err_clk_pwr; 41 clk_set_rate(tdev->clk_pwr, 204000000); 42 udelay(10); 43 44 reset_control_assert(tdev->rst); 45 udelay(10); 46 47 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 48 if (ret) 49 goto err_clamp; 50 udelay(10); 51 52 reset_control_deassert(tdev->rst); 53 udelay(10); 54 55 return 0; 56 57 err_clamp: 58 clk_disable_unprepare(tdev->clk_pwr); 59 err_clk_pwr: 60 clk_disable_unprepare(tdev->clk); 61 err_clk: 62 regulator_disable(tdev->vdd); 63 err_power: 64 return ret; 65 } 66 67 static int 68 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) 69 { 70 reset_control_assert(tdev->rst); 71 udelay(10); 72 73 clk_disable_unprepare(tdev->clk_pwr); 74 clk_disable_unprepare(tdev->clk); 75 udelay(10); 76 77 return regulator_disable(tdev->vdd); 78 } 79 80 static void 81 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) 82 { 83 #if IS_ENABLED(CONFIG_IOMMU_API) 84 struct device *dev = &tdev->pdev->dev; 85 unsigned long pgsize_bitmap; 86 int ret; 87 88 if (!tdev->func->iommu_bit) 89 return; 90 91 mutex_init(&tdev->iommu.mutex); 92 93 if (iommu_present(&platform_bus_type)) { 94 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); 95 if (IS_ERR(tdev->iommu.domain)) 96 goto error; 97 98 /* 99 * A IOMMU is only usable if it supports page sizes smaller 100 * or equal to the system's PAGE_SIZE, with a preference if 101 * both are equal. 102 */ 103 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; 104 if (pgsize_bitmap & PAGE_SIZE) { 105 tdev->iommu.pgshift = PAGE_SHIFT; 106 } else { 107 tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); 108 if (tdev->iommu.pgshift == 0) { 109 dev_warn(dev, "unsupported IOMMU page size\n"); 110 goto free_domain; 111 } 112 tdev->iommu.pgshift -= 1; 113 } 114 115 ret = iommu_attach_device(tdev->iommu.domain, dev); 116 if (ret) 117 goto free_domain; 118 119 ret = nvkm_mm_init(&tdev->iommu.mm, 0, 120 (1ULL << tdev->func->iommu_bit) >> 121 tdev->iommu.pgshift, 1); 122 if (ret) 123 goto detach_device; 124 } 125 126 return; 127 128 detach_device: 129 iommu_detach_device(tdev->iommu.domain, dev); 130 131 free_domain: 132 iommu_domain_free(tdev->iommu.domain); 133 134 error: 135 tdev->iommu.domain = NULL; 136 tdev->iommu.pgshift = 0; 137 dev_err(dev, "cannot initialize IOMMU MM\n"); 138 #endif 139 } 140 141 static void 142 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) 143 { 144 #if IS_ENABLED(CONFIG_IOMMU_API) 145 if (tdev->iommu.domain) { 146 nvkm_mm_fini(&tdev->iommu.mm); 147 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); 148 iommu_domain_free(tdev->iommu.domain); 149 } 150 #endif 151 } 152 153 static struct nvkm_device_tegra * 154 nvkm_device_tegra(struct nvkm_device *device) 155 { 156 return container_of(device, struct nvkm_device_tegra, device); 157 } 158 159 static struct resource * 160 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) 161 { 162 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 163 return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); 164 } 165 166 static resource_size_t 167 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) 168 { 169 struct resource *res = nvkm_device_tegra_resource(device, bar); 170 return res ? res->start : 0; 171 } 172 173 static resource_size_t 174 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) 175 { 176 struct resource *res = nvkm_device_tegra_resource(device, bar); 177 return res ? resource_size(res) : 0; 178 } 179 180 static irqreturn_t 181 nvkm_device_tegra_intr(int irq, void *arg) 182 { 183 struct nvkm_device_tegra *tdev = arg; 184 struct nvkm_mc *mc = tdev->device.mc; 185 bool handled = false; 186 if (likely(mc)) { 187 nvkm_mc_intr_unarm(mc); 188 nvkm_mc_intr(mc, &handled); 189 nvkm_mc_intr_rearm(mc); 190 } 191 return handled ? IRQ_HANDLED : IRQ_NONE; 192 } 193 194 static void 195 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) 196 { 197 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 198 if (tdev->irq) { 199 free_irq(tdev->irq, tdev); 200 tdev->irq = 0; 201 }; 202 } 203 204 static int 205 nvkm_device_tegra_init(struct nvkm_device *device) 206 { 207 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 208 int irq, ret; 209 210 irq = platform_get_irq_byname(tdev->pdev, "stall"); 211 if (irq < 0) 212 return irq; 213 214 ret = request_irq(irq, nvkm_device_tegra_intr, 215 IRQF_SHARED, "nvkm", tdev); 216 if (ret) 217 return ret; 218 219 tdev->irq = irq; 220 return 0; 221 } 222 223 static void * 224 nvkm_device_tegra_dtor(struct nvkm_device *device) 225 { 226 struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); 227 nvkm_device_tegra_power_down(tdev); 228 nvkm_device_tegra_remove_iommu(tdev); 229 return tdev; 230 } 231 232 static const struct nvkm_device_func 233 nvkm_device_tegra_func = { 234 .tegra = nvkm_device_tegra, 235 .dtor = nvkm_device_tegra_dtor, 236 .init = nvkm_device_tegra_init, 237 .fini = nvkm_device_tegra_fini, 238 .resource_addr = nvkm_device_tegra_resource_addr, 239 .resource_size = nvkm_device_tegra_resource_size, 240 .cpu_coherent = false, 241 }; 242 243 int 244 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 245 struct platform_device *pdev, 246 const char *cfg, const char *dbg, 247 bool detect, bool mmio, u64 subdev_mask, 248 struct nvkm_device **pdevice) 249 { 250 struct nvkm_device_tegra *tdev; 251 int ret; 252 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 254 return -ENOMEM; 255 *pdevice = &tdev->device; 256 tdev->func = func; 257 tdev->pdev = pdev; 258 tdev->irq = -1; 259 260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 261 if (IS_ERR(tdev->vdd)) 262 return PTR_ERR(tdev->vdd); 263 264 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 265 if (IS_ERR(tdev->rst)) 266 return PTR_ERR(tdev->rst); 267 268 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 269 if (IS_ERR(tdev->clk)) 270 return PTR_ERR(tdev->clk); 271 272 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 273 if (IS_ERR(tdev->clk_pwr)) 274 return PTR_ERR(tdev->clk_pwr); 275 276 nvkm_device_tegra_probe_iommu(tdev); 277 278 ret = nvkm_device_tegra_power_up(tdev); 279 if (ret) 280 return ret; 281 282 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 283 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 284 NVKM_DEVICE_TEGRA, pdev->id, NULL, 285 cfg, dbg, detect, mmio, subdev_mask, 286 &tdev->device); 287 if (ret) 288 return ret; 289 290 return 0; 291 } 292 #else 293 int 294 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, 295 struct platform_device *pdev, 296 const char *cfg, const char *dbg, 297 bool detect, bool mmio, u64 subdev_mask, 298 struct nvkm_device **pdevice) 299 { 300 return -ENOSYS; 301 } 302 #endif 303