1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "acpi.h" 26 27 #include <core/notify.h> 28 #include <core/option.h> 29 30 #include <subdev/bios.h> 31 32 static DEFINE_MUTEX(nv_devices_mutex); 33 static LIST_HEAD(nv_devices); 34 35 struct nvkm_device * 36 nvkm_device_find(u64 name) 37 { 38 struct nvkm_device *device, *match = NULL; 39 mutex_lock(&nv_devices_mutex); 40 list_for_each_entry(device, &nv_devices, head) { 41 if (device->handle == name) { 42 match = device; 43 break; 44 } 45 } 46 mutex_unlock(&nv_devices_mutex); 47 return match; 48 } 49 50 int 51 nvkm_device_list(u64 *name, int size) 52 { 53 struct nvkm_device *device; 54 int nr = 0; 55 mutex_lock(&nv_devices_mutex); 56 list_for_each_entry(device, &nv_devices, head) { 57 if (nr++ < size) 58 name[nr - 1] = device->handle; 59 } 60 mutex_unlock(&nv_devices_mutex); 61 return nr; 62 } 63 64 #include <core/parent.h> 65 66 struct nvkm_device * 67 nv_device(void *obj) 68 { 69 struct nvkm_object *device = nv_object(obj); 70 71 if (device->engine == NULL) { 72 while (device && device->parent) { 73 if (nv_mclass(device) == 0x0080) { 74 struct { 75 struct nvkm_parent base; 76 struct nvkm_device *device; 77 } *udevice = (void *)device; 78 return udevice->device; 79 } 80 device = device->parent; 81 } 82 } else { 83 device = &nv_object(obj)->engine->subdev.object; 84 if (device && device->parent) 85 device = device->parent; 86 } 87 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 88 BUG_ON(!device); 89 #endif 90 return (void *)device; 91 } 92 93 static int 94 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 95 struct nvkm_notify *notify) 96 { 97 if (!WARN_ON(size != 0)) { 98 notify->size = 0; 99 notify->types = 1; 100 notify->index = 0; 101 return 0; 102 } 103 return -EINVAL; 104 } 105 106 static const struct nvkm_event_func 107 nvkm_device_event_func = { 108 .ctor = nvkm_device_event_ctor, 109 }; 110 111 int 112 nvkm_device_fini(struct nvkm_device *device, bool suspend) 113 { 114 struct nvkm_object *subdev; 115 int ret, i; 116 117 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) { 118 if ((subdev = device->subdev[i])) { 119 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 120 ret = nvkm_object_dec(subdev, suspend); 121 if (ret && suspend) 122 goto fail; 123 } 124 } 125 } 126 127 ret = nvkm_acpi_fini(device, suspend); 128 fail: 129 for (; ret && i < NVDEV_SUBDEV_NR; i++) { 130 if ((subdev = device->subdev[i])) { 131 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 132 ret = nvkm_object_inc(subdev); 133 if (ret) { 134 /* XXX */ 135 } 136 } 137 } 138 } 139 140 return ret; 141 } 142 143 int 144 nvkm_device_init(struct nvkm_device *device) 145 { 146 struct nvkm_object *subdev; 147 int ret, i = 0, c; 148 149 ret = nvkm_acpi_init(device); 150 if (ret) 151 goto fail; 152 153 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) { 154 #define _(s,m) case s: if (device->oclass[s] && !device->subdev[s]) { \ 155 ret = nvkm_object_old(nv_object(device), NULL, \ 156 device->oclass[s], NULL, (s), \ 157 (struct nvkm_object **)&device->m); \ 158 if (ret == -ENODEV) { \ 159 device->oclass[s] = NULL; \ 160 continue; \ 161 } \ 162 if (ret) \ 163 goto fail; \ 164 device->subdev[s] = (struct nvkm_object *)device->m; \ 165 } break 166 switch (i) { 167 _(NVDEV_SUBDEV_BAR , bar); 168 _(NVDEV_SUBDEV_VBIOS , bios); 169 _(NVDEV_SUBDEV_BUS , bus); 170 _(NVDEV_SUBDEV_CLK , clk); 171 _(NVDEV_SUBDEV_DEVINIT, devinit); 172 _(NVDEV_SUBDEV_FB , fb); 173 _(NVDEV_SUBDEV_FUSE , fuse); 174 _(NVDEV_SUBDEV_GPIO , gpio); 175 _(NVDEV_SUBDEV_I2C , i2c); 176 _(NVDEV_SUBDEV_IBUS , ibus); 177 _(NVDEV_SUBDEV_INSTMEM, imem); 178 _(NVDEV_SUBDEV_LTC , ltc); 179 _(NVDEV_SUBDEV_MC , mc); 180 _(NVDEV_SUBDEV_MMU , mmu); 181 _(NVDEV_SUBDEV_MXM , mxm); 182 _(NVDEV_SUBDEV_PMU , pmu); 183 _(NVDEV_SUBDEV_THERM , therm); 184 _(NVDEV_SUBDEV_TIMER , timer); 185 _(NVDEV_SUBDEV_VOLT , volt); 186 _(NVDEV_ENGINE_BSP , bsp); 187 _(NVDEV_ENGINE_CE0 , ce[0]); 188 _(NVDEV_ENGINE_CE1 , ce[1]); 189 _(NVDEV_ENGINE_CE2 , ce[2]); 190 _(NVDEV_ENGINE_CIPHER , cipher); 191 _(NVDEV_ENGINE_DISP , disp); 192 _(NVDEV_ENGINE_DMAOBJ , dma); 193 _(NVDEV_ENGINE_FIFO , fifo); 194 _(NVDEV_ENGINE_GR , gr); 195 _(NVDEV_ENGINE_IFB , ifb); 196 _(NVDEV_ENGINE_ME , me); 197 _(NVDEV_ENGINE_MPEG , mpeg); 198 _(NVDEV_ENGINE_MSENC , msenc); 199 _(NVDEV_ENGINE_MSPDEC , mspdec); 200 _(NVDEV_ENGINE_MSPPP , msppp); 201 _(NVDEV_ENGINE_MSVLD , msvld); 202 _(NVDEV_ENGINE_PM , pm); 203 _(NVDEV_ENGINE_SEC , sec); 204 _(NVDEV_ENGINE_SW , sw); 205 _(NVDEV_ENGINE_VIC , vic); 206 _(NVDEV_ENGINE_VP , vp); 207 default: 208 WARN_ON(1); 209 continue; 210 } 211 #undef _ 212 213 /* note: can't init *any* subdevs until devinit has been run 214 * due to not knowing exactly what the vbios init tables will 215 * mess with. devinit also can't be run until all of its 216 * dependencies have been created. 217 * 218 * this code delays init of any subdev until all of devinit's 219 * dependencies have been created, and then initialises each 220 * subdev in turn as they're created. 221 */ 222 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) { 223 struct nvkm_object *subdev = device->subdev[c++]; 224 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) { 225 ret = nvkm_object_inc(subdev); 226 if (ret) 227 goto fail; 228 } else 229 if (subdev) { 230 nvkm_subdev_reset(subdev); 231 } 232 } 233 } 234 235 ret = 0; 236 fail: 237 for (--i; ret && i >= 0; i--) { 238 if ((subdev = device->subdev[i])) { 239 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) 240 nvkm_object_dec(subdev, false); 241 } 242 } 243 244 if (ret) 245 nvkm_acpi_fini(device, false); 246 return ret; 247 } 248 249 resource_size_t 250 nv_device_resource_start(struct nvkm_device *device, unsigned int bar) 251 { 252 if (nv_device_is_pci(device)) { 253 return pci_resource_start(device->pdev, bar); 254 } else { 255 struct resource *res; 256 res = platform_get_resource(device->platformdev, 257 IORESOURCE_MEM, bar); 258 if (!res) 259 return 0; 260 return res->start; 261 } 262 } 263 264 resource_size_t 265 nv_device_resource_len(struct nvkm_device *device, unsigned int bar) 266 { 267 if (nv_device_is_pci(device)) { 268 return pci_resource_len(device->pdev, bar); 269 } else { 270 struct resource *res; 271 res = platform_get_resource(device->platformdev, 272 IORESOURCE_MEM, bar); 273 if (!res) 274 return 0; 275 return resource_size(res); 276 } 277 } 278 279 int 280 nv_device_get_irq(struct nvkm_device *device, bool stall) 281 { 282 if (nv_device_is_pci(device)) { 283 return device->pdev->irq; 284 } else { 285 return platform_get_irq_byname(device->platformdev, 286 stall ? "stall" : "nonstall"); 287 } 288 } 289 290 static struct nvkm_oclass 291 nvkm_device_oclass = { 292 .ofuncs = &(struct nvkm_ofuncs) { 293 }, 294 }; 295 296 void 297 nvkm_device_del(struct nvkm_device **pdevice) 298 { 299 struct nvkm_device *device = *pdevice; 300 int i; 301 if (device) { 302 mutex_lock(&nv_devices_mutex); 303 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) 304 nvkm_object_ref(NULL, &device->subdev[i]); 305 306 nvkm_event_fini(&device->event); 307 308 if (device->pri) 309 iounmap(device->pri); 310 list_del(&device->head); 311 mutex_unlock(&nv_devices_mutex); 312 313 nvkm_engine_destroy(&device->engine); 314 *pdevice = NULL; 315 } 316 } 317 318 int 319 nvkm_device_new(void *dev, enum nv_bus_type type, u64 name, 320 const char *sname, const char *cfg, const char *dbg, 321 bool detect, bool mmio, u64 subdev_mask, 322 struct nvkm_device **pdevice) 323 { 324 struct nvkm_device *device; 325 u64 mmio_base, mmio_size; 326 u32 boot0, strap; 327 void __iomem *map; 328 int ret = -EEXIST; 329 int i; 330 331 mutex_lock(&nv_devices_mutex); 332 list_for_each_entry(device, &nv_devices, head) { 333 if (device->handle == name) 334 goto done; 335 } 336 337 ret = nvkm_engine_create(NULL, NULL, &nvkm_device_oclass, true, 338 "DEVICE", "device", &device); 339 *pdevice = device; 340 if (ret) 341 goto done; 342 343 switch (type) { 344 case NVKM_BUS_PCI: 345 device->pdev = dev; 346 device->dev = &device->pdev->dev; 347 break; 348 case NVKM_BUS_PLATFORM: 349 device->platformdev = dev; 350 device->dev = &device->platformdev->dev; 351 break; 352 } 353 device->handle = name; 354 device->cfgopt = cfg; 355 device->dbgopt = dbg; 356 device->name = sname; 357 358 nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE"); 359 list_add_tail(&device->head, &nv_devices); 360 361 ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event); 362 if (ret) 363 goto done; 364 365 mmio_base = nv_device_resource_start(device, 0); 366 mmio_size = nv_device_resource_len(device, 0); 367 368 /* identify the chipset, and determine classes of subdev/engines */ 369 if (detect) { 370 map = ioremap(mmio_base, 0x102000); 371 if (ret = -ENOMEM, map == NULL) 372 goto done; 373 374 /* switch mmio to cpu's native endianness */ 375 #ifndef __BIG_ENDIAN 376 if (ioread32_native(map + 0x000004) != 0x00000000) { 377 #else 378 if (ioread32_native(map + 0x000004) == 0x00000000) { 379 #endif 380 iowrite32_native(0x01000001, map + 0x000004); 381 ioread32_native(map); 382 } 383 384 /* read boot0 and strapping information */ 385 boot0 = ioread32_native(map + 0x000000); 386 strap = ioread32_native(map + 0x101000); 387 iounmap(map); 388 389 /* determine chipset and derive architecture from it */ 390 if ((boot0 & 0x1f000000) > 0) { 391 device->chipset = (boot0 & 0x1ff00000) >> 20; 392 device->chiprev = (boot0 & 0x000000ff); 393 switch (device->chipset & 0x1f0) { 394 case 0x010: { 395 if (0x461 & (1 << (device->chipset & 0xf))) 396 device->card_type = NV_10; 397 else 398 device->card_type = NV_11; 399 device->chiprev = 0x00; 400 break; 401 } 402 case 0x020: device->card_type = NV_20; break; 403 case 0x030: device->card_type = NV_30; break; 404 case 0x040: 405 case 0x060: device->card_type = NV_40; break; 406 case 0x050: 407 case 0x080: 408 case 0x090: 409 case 0x0a0: device->card_type = NV_50; break; 410 case 0x0c0: 411 case 0x0d0: device->card_type = NV_C0; break; 412 case 0x0e0: 413 case 0x0f0: 414 case 0x100: device->card_type = NV_E0; break; 415 case 0x110: 416 case 0x120: device->card_type = GM100; break; 417 default: 418 break; 419 } 420 } else 421 if ((boot0 & 0xff00fff0) == 0x20004000) { 422 if (boot0 & 0x00f00000) 423 device->chipset = 0x05; 424 else 425 device->chipset = 0x04; 426 device->card_type = NV_04; 427 } 428 429 switch (device->card_type) { 430 case NV_04: ret = nv04_identify(device); break; 431 case NV_10: 432 case NV_11: ret = nv10_identify(device); break; 433 case NV_20: ret = nv20_identify(device); break; 434 case NV_30: ret = nv30_identify(device); break; 435 case NV_40: ret = nv40_identify(device); break; 436 case NV_50: ret = nv50_identify(device); break; 437 case NV_C0: ret = gf100_identify(device); break; 438 case NV_E0: ret = gk104_identify(device); break; 439 case GM100: ret = gm100_identify(device); break; 440 default: 441 ret = -EINVAL; 442 break; 443 } 444 445 if (ret) { 446 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 447 goto done; 448 } 449 450 nvdev_info(device, "NVIDIA %s (%08x)\n", device->cname, boot0); 451 452 /* determine frequency of timing crystal */ 453 if ( device->card_type <= NV_10 || device->chipset < 0x17 || 454 (device->chipset >= 0x20 && device->chipset < 0x25)) 455 strap &= 0x00000040; 456 else 457 strap &= 0x00400040; 458 459 switch (strap) { 460 case 0x00000000: device->crystal = 13500; break; 461 case 0x00000040: device->crystal = 14318; break; 462 case 0x00400000: device->crystal = 27000; break; 463 case 0x00400040: device->crystal = 25000; break; 464 } 465 } else { 466 device->cname = "NULL"; 467 device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass; 468 } 469 470 if (mmio) { 471 device->pri = ioremap(mmio_base, mmio_size); 472 if (!device->pri) { 473 nvdev_error(device, "unable to map PRI\n"); 474 return -ENOMEM; 475 } 476 } 477 478 /* disable subdevs that aren't required (used by tools) */ 479 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 480 if (!(subdev_mask & (1ULL << i))) 481 device->oclass[i] = NULL; 482 } 483 484 atomic_set(&device->engine.subdev.object.usecount, 2); 485 mutex_init(&device->mutex); 486 done: 487 mutex_unlock(&nv_devices_mutex); 488 return ret; 489 } 490