1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 25 #include <core/notify.h> 26 #include <core/object.h> 27 #include <core/device.h> 28 #include <core/client.h> 29 #include <core/option.h> 30 #include <nvif/unpack.h> 31 #include <nvif/class.h> 32 33 #include <subdev/bios.h> 34 #include <subdev/fb.h> 35 #include <subdev/instmem.h> 36 37 #include "priv.h" 38 #include "acpi.h" 39 40 static DEFINE_MUTEX(nv_devices_mutex); 41 static LIST_HEAD(nv_devices); 42 43 struct nouveau_device * 44 nouveau_device_find(u64 name) 45 { 46 struct nouveau_device *device, *match = NULL; 47 mutex_lock(&nv_devices_mutex); 48 list_for_each_entry(device, &nv_devices, head) { 49 if (device->handle == name) { 50 match = device; 51 break; 52 } 53 } 54 mutex_unlock(&nv_devices_mutex); 55 return match; 56 } 57 58 int 59 nouveau_device_list(u64 *name, int size) 60 { 61 struct nouveau_device *device; 62 int nr = 0; 63 mutex_lock(&nv_devices_mutex); 64 list_for_each_entry(device, &nv_devices, head) { 65 if (nr++ < size) 66 name[nr - 1] = device->handle; 67 } 68 mutex_unlock(&nv_devices_mutex); 69 return nr; 70 } 71 72 /****************************************************************************** 73 * nouveau_devobj (0x0080): class implementation 74 *****************************************************************************/ 75 76 struct nouveau_devobj { 77 struct nouveau_parent base; 78 struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; 79 }; 80 81 static int 82 nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size) 83 { 84 struct nouveau_device *device = nv_device(object); 85 struct nouveau_fb *pfb = nouveau_fb(device); 86 struct nouveau_instmem *imem = nouveau_instmem(device); 87 union { 88 struct nv_device_info_v0 v0; 89 } *args = data; 90 int ret; 91 92 nv_ioctl(object, "device info size %d\n", size); 93 if (nvif_unpack(args->v0, 0, 0, false)) { 94 nv_ioctl(object, "device info vers %d\n", args->v0.version); 95 } else 96 return ret; 97 98 switch (device->chipset) { 99 case 0x01a: 100 case 0x01f: 101 case 0x04c: 102 case 0x04e: 103 case 0x063: 104 case 0x067: 105 case 0x068: 106 case 0x0aa: 107 case 0x0ac: 108 case 0x0af: 109 args->v0.platform = NV_DEVICE_INFO_V0_IGP; 110 break; 111 default: 112 if (device->pdev) { 113 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP)) 114 args->v0.platform = NV_DEVICE_INFO_V0_AGP; 115 else 116 if (pci_is_pcie(device->pdev)) 117 args->v0.platform = NV_DEVICE_INFO_V0_PCIE; 118 else 119 args->v0.platform = NV_DEVICE_INFO_V0_PCI; 120 } else { 121 args->v0.platform = NV_DEVICE_INFO_V0_SOC; 122 } 123 break; 124 } 125 126 switch (device->card_type) { 127 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break; 128 case NV_10: 129 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break; 130 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break; 131 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break; 132 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break; 133 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break; 134 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; 135 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; 136 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; 137 default: 138 args->v0.family = 0; 139 break; 140 } 141 142 args->v0.chipset = device->chipset; 143 args->v0.revision = device->chiprev; 144 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size; 145 else args->v0.ram_size = args->v0.ram_user = 0; 146 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved; 147 return 0; 148 } 149 150 static int 151 nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd, 152 void *data, u32 size) 153 { 154 switch (mthd) { 155 case NV_DEVICE_V0_INFO: 156 return nouveau_devobj_info(object, data, size); 157 default: 158 break; 159 } 160 return -EINVAL; 161 } 162 163 static u8 164 nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) 165 { 166 return nv_rd08(object->engine, addr); 167 } 168 169 static u16 170 nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) 171 { 172 return nv_rd16(object->engine, addr); 173 } 174 175 static u32 176 nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) 177 { 178 return nv_rd32(object->engine, addr); 179 } 180 181 static void 182 nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) 183 { 184 nv_wr08(object->engine, addr, data); 185 } 186 187 static void 188 nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) 189 { 190 nv_wr16(object->engine, addr, data); 191 } 192 193 static void 194 nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) 195 { 196 nv_wr32(object->engine, addr, data); 197 } 198 199 static int 200 nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size) 201 { 202 struct nouveau_device *device = nv_device(object); 203 *addr = nv_device_resource_start(device, 0); 204 *size = nv_device_resource_len(device, 0); 205 return 0; 206 } 207 208 static const u64 disable_map[] = { 209 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS, 210 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE, 211 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE, 212 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE, 213 [NVDEV_SUBDEV_CLK ] = NV_DEVICE_V0_DISABLE_CORE, 214 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE, 215 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE, 216 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE, 217 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE, 218 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE, 219 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE, 220 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE, 221 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE, 222 [NVDEV_SUBDEV_MMU] = NV_DEVICE_V0_DISABLE_CORE, 223 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE, 224 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE, 225 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE, 226 [NVDEV_SUBDEV_PMU] = NV_DEVICE_V0_DISABLE_CORE, 227 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE, 228 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE, 229 [NVDEV_ENGINE_PM ] = NV_DEVICE_V0_DISABLE_CORE, 230 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO, 231 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO, 232 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GR, 233 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG, 234 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME, 235 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP, 236 [NVDEV_ENGINE_CIPHER] = NV_DEVICE_V0_DISABLE_CIPHER, 237 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP, 238 [NVDEV_ENGINE_MSPPP] = NV_DEVICE_V0_DISABLE_MSPPP, 239 [NVDEV_ENGINE_CE0] = NV_DEVICE_V0_DISABLE_CE0, 240 [NVDEV_ENGINE_CE1] = NV_DEVICE_V0_DISABLE_CE1, 241 [NVDEV_ENGINE_CE2] = NV_DEVICE_V0_DISABLE_CE2, 242 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC, 243 [NVDEV_ENGINE_MSENC] = NV_DEVICE_V0_DISABLE_MSENC, 244 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP, 245 [NVDEV_ENGINE_MSVLD] = NV_DEVICE_V0_DISABLE_MSVLD, 246 [NVDEV_ENGINE_SEC] = NV_DEVICE_V0_DISABLE_SEC, 247 [NVDEV_SUBDEV_NR] = 0, 248 }; 249 250 static void 251 nouveau_devobj_dtor(struct nouveau_object *object) 252 { 253 struct nouveau_devobj *devobj = (void *)object; 254 int i; 255 256 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) 257 nouveau_object_ref(NULL, &devobj->subdev[i]); 258 259 nouveau_parent_destroy(&devobj->base); 260 } 261 262 static struct nouveau_oclass 263 nouveau_devobj_oclass_super = { 264 .handle = NV_DEVICE, 265 .ofuncs = &(struct nouveau_ofuncs) { 266 .dtor = nouveau_devobj_dtor, 267 .init = _nouveau_parent_init, 268 .fini = _nouveau_parent_fini, 269 .mthd = nouveau_devobj_mthd, 270 .map = nouveau_devobj_map, 271 .rd08 = nouveau_devobj_rd08, 272 .rd16 = nouveau_devobj_rd16, 273 .rd32 = nouveau_devobj_rd32, 274 .wr08 = nouveau_devobj_wr08, 275 .wr16 = nouveau_devobj_wr16, 276 .wr32 = nouveau_devobj_wr32, 277 } 278 }; 279 280 static int 281 nouveau_devobj_ctor(struct nouveau_object *parent, 282 struct nouveau_object *engine, 283 struct nouveau_oclass *oclass, void *data, u32 size, 284 struct nouveau_object **pobject) 285 { 286 union { 287 struct nv_device_v0 v0; 288 } *args = data; 289 struct nouveau_client *client = nv_client(parent); 290 struct nouveau_device *device; 291 struct nouveau_devobj *devobj; 292 u32 boot0, strap; 293 u64 disable, mmio_base, mmio_size; 294 void __iomem *map; 295 int ret, i, c; 296 297 nv_ioctl(parent, "create device size %d\n", size); 298 if (nvif_unpack(args->v0, 0, 0, false)) { 299 nv_ioctl(parent, "create device v%d device %016llx " 300 "disable %016llx debug0 %016llx\n", 301 args->v0.version, args->v0.device, 302 args->v0.disable, args->v0.debug0); 303 } else 304 return ret; 305 306 /* give priviledged clients register access */ 307 if (client->super) 308 oclass = &nouveau_devobj_oclass_super; 309 310 /* find the device subdev that matches what the client requested */ 311 device = nv_device(client->device); 312 if (args->v0.device != ~0) { 313 device = nouveau_device_find(args->v0.device); 314 if (!device) 315 return -ENODEV; 316 } 317 318 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, 319 nouveau_control_oclass, 320 (1ULL << NVDEV_ENGINE_DMAOBJ) | 321 (1ULL << NVDEV_ENGINE_FIFO) | 322 (1ULL << NVDEV_ENGINE_DISP) | 323 (1ULL << NVDEV_ENGINE_PM ), &devobj); 324 *pobject = nv_object(devobj); 325 if (ret) 326 return ret; 327 328 mmio_base = nv_device_resource_start(device, 0); 329 mmio_size = nv_device_resource_len(device, 0); 330 331 /* translate api disable mask into internal mapping */ 332 disable = args->v0.debug0; 333 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 334 if (args->v0.disable & disable_map[i]) 335 disable |= (1ULL << i); 336 } 337 338 /* identify the chipset, and determine classes of subdev/engines */ 339 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) && 340 !device->card_type) { 341 map = ioremap(mmio_base, 0x102000); 342 if (map == NULL) 343 return -ENOMEM; 344 345 /* switch mmio to cpu's native endianness */ 346 #ifndef __BIG_ENDIAN 347 if (ioread32_native(map + 0x000004) != 0x00000000) 348 #else 349 if (ioread32_native(map + 0x000004) == 0x00000000) 350 #endif 351 iowrite32_native(0x01000001, map + 0x000004); 352 353 /* read boot0 and strapping information */ 354 boot0 = ioread32_native(map + 0x000000); 355 strap = ioread32_native(map + 0x101000); 356 iounmap(map); 357 358 /* determine chipset and derive architecture from it */ 359 if ((boot0 & 0x1f000000) > 0) { 360 device->chipset = (boot0 & 0x1ff00000) >> 20; 361 device->chiprev = (boot0 & 0x000000ff); 362 switch (device->chipset & 0x1f0) { 363 case 0x010: { 364 if (0x461 & (1 << (device->chipset & 0xf))) 365 device->card_type = NV_10; 366 else 367 device->card_type = NV_11; 368 device->chiprev = 0x00; 369 break; 370 } 371 case 0x020: device->card_type = NV_20; break; 372 case 0x030: device->card_type = NV_30; break; 373 case 0x040: 374 case 0x060: device->card_type = NV_40; break; 375 case 0x050: 376 case 0x080: 377 case 0x090: 378 case 0x0a0: device->card_type = NV_50; break; 379 case 0x0c0: 380 case 0x0d0: device->card_type = NV_C0; break; 381 case 0x0e0: 382 case 0x0f0: 383 case 0x100: device->card_type = NV_E0; break; 384 case 0x110: 385 case 0x120: device->card_type = GM100; break; 386 default: 387 break; 388 } 389 } else 390 if ((boot0 & 0xff00fff0) == 0x20004000) { 391 if (boot0 & 0x00f00000) 392 device->chipset = 0x05; 393 else 394 device->chipset = 0x04; 395 device->card_type = NV_04; 396 } 397 398 switch (device->card_type) { 399 case NV_04: ret = nv04_identify(device); break; 400 case NV_10: 401 case NV_11: ret = nv10_identify(device); break; 402 case NV_20: ret = nv20_identify(device); break; 403 case NV_30: ret = nv30_identify(device); break; 404 case NV_40: ret = nv40_identify(device); break; 405 case NV_50: ret = nv50_identify(device); break; 406 case NV_C0: ret = nvc0_identify(device); break; 407 case NV_E0: ret = nve0_identify(device); break; 408 case GM100: ret = gm100_identify(device); break; 409 default: 410 ret = -EINVAL; 411 break; 412 } 413 414 if (ret) { 415 nv_error(device, "unknown chipset, 0x%08x\n", boot0); 416 return ret; 417 } 418 419 nv_info(device, "BOOT0 : 0x%08x\n", boot0); 420 nv_info(device, "Chipset: %s (NV%02X)\n", 421 device->cname, device->chipset); 422 nv_info(device, "Family : NV%02X\n", device->card_type); 423 424 /* determine frequency of timing crystal */ 425 if ( device->card_type <= NV_10 || device->chipset < 0x17 || 426 (device->chipset >= 0x20 && device->chipset < 0x25)) 427 strap &= 0x00000040; 428 else 429 strap &= 0x00400040; 430 431 switch (strap) { 432 case 0x00000000: device->crystal = 13500; break; 433 case 0x00000040: device->crystal = 14318; break; 434 case 0x00400000: device->crystal = 27000; break; 435 case 0x00400040: device->crystal = 25000; break; 436 } 437 438 nv_debug(device, "crystal freq: %dKHz\n", device->crystal); 439 } else 440 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) { 441 device->cname = "NULL"; 442 device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass; 443 } 444 445 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) && 446 !nv_subdev(device)->mmio) { 447 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); 448 if (!nv_subdev(device)->mmio) { 449 nv_error(device, "unable to map device registers\n"); 450 return -ENOMEM; 451 } 452 } 453 454 /* ensure requested subsystems are available for use */ 455 for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) { 456 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i))) 457 continue; 458 459 if (device->subdev[i]) { 460 nouveau_object_ref(device->subdev[i], 461 &devobj->subdev[i]); 462 continue; 463 } 464 465 ret = nouveau_object_ctor(nv_object(device), NULL, 466 oclass, NULL, i, 467 &devobj->subdev[i]); 468 if (ret == -ENODEV) 469 continue; 470 if (ret) 471 return ret; 472 473 device->subdev[i] = devobj->subdev[i]; 474 475 /* note: can't init *any* subdevs until devinit has been run 476 * due to not knowing exactly what the vbios init tables will 477 * mess with. devinit also can't be run until all of its 478 * dependencies have been created. 479 * 480 * this code delays init of any subdev until all of devinit's 481 * dependencies have been created, and then initialises each 482 * subdev in turn as they're created. 483 */ 484 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) { 485 struct nouveau_object *subdev = devobj->subdev[c++]; 486 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) { 487 ret = nouveau_object_inc(subdev); 488 if (ret) 489 return ret; 490 atomic_dec(&nv_object(device)->usecount); 491 } else 492 if (subdev) { 493 nouveau_subdev_reset(subdev); 494 } 495 } 496 } 497 498 return 0; 499 } 500 501 static struct nouveau_ofuncs 502 nouveau_devobj_ofuncs = { 503 .ctor = nouveau_devobj_ctor, 504 .dtor = nouveau_devobj_dtor, 505 .init = _nouveau_parent_init, 506 .fini = _nouveau_parent_fini, 507 .mthd = nouveau_devobj_mthd, 508 }; 509 510 /****************************************************************************** 511 * nouveau_device: engine functions 512 *****************************************************************************/ 513 514 struct nouveau_device * 515 nv_device(void *obj) 516 { 517 struct nouveau_object *device = nv_object(obj); 518 if (device->engine == NULL) { 519 while (device && device->parent) 520 device = device->parent; 521 } else { 522 device = &nv_object(obj)->engine->subdev.object; 523 if (device && device->parent) 524 device = device->parent; 525 } 526 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA 527 if (unlikely(!device)) 528 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj)); 529 #endif 530 return (void *)device; 531 } 532 533 static struct nouveau_oclass 534 nouveau_device_sclass[] = { 535 { 0x0080, &nouveau_devobj_ofuncs }, 536 {} 537 }; 538 539 static int 540 nouveau_device_event_ctor(struct nouveau_object *object, void *data, u32 size, 541 struct nvkm_notify *notify) 542 { 543 if (!WARN_ON(size != 0)) { 544 notify->size = 0; 545 notify->types = 1; 546 notify->index = 0; 547 return 0; 548 } 549 return -EINVAL; 550 } 551 552 static const struct nvkm_event_func 553 nouveau_device_event_func = { 554 .ctor = nouveau_device_event_ctor, 555 }; 556 557 static int 558 nouveau_device_fini(struct nouveau_object *object, bool suspend) 559 { 560 struct nouveau_device *device = (void *)object; 561 struct nouveau_object *subdev; 562 int ret, i; 563 564 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) { 565 if ((subdev = device->subdev[i])) { 566 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 567 ret = nouveau_object_dec(subdev, suspend); 568 if (ret && suspend) 569 goto fail; 570 } 571 } 572 } 573 574 ret = nvkm_acpi_fini(device, suspend); 575 fail: 576 for (; ret && i < NVDEV_SUBDEV_NR; i++) { 577 if ((subdev = device->subdev[i])) { 578 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 579 ret = nouveau_object_inc(subdev); 580 if (ret) { 581 /* XXX */ 582 } 583 } 584 } 585 } 586 587 return ret; 588 } 589 590 static int 591 nouveau_device_init(struct nouveau_object *object) 592 { 593 struct nouveau_device *device = (void *)object; 594 struct nouveau_object *subdev; 595 int ret, i = 0; 596 597 ret = nvkm_acpi_init(device); 598 if (ret) 599 goto fail; 600 601 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 602 if ((subdev = device->subdev[i])) { 603 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { 604 ret = nouveau_object_inc(subdev); 605 if (ret) 606 goto fail; 607 } else { 608 nouveau_subdev_reset(subdev); 609 } 610 } 611 } 612 613 ret = 0; 614 fail: 615 for (--i; ret && i >= 0; i--) { 616 if ((subdev = device->subdev[i])) { 617 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) 618 nouveau_object_dec(subdev, false); 619 } 620 } 621 622 if (ret) 623 nvkm_acpi_fini(device, false); 624 return ret; 625 } 626 627 static void 628 nouveau_device_dtor(struct nouveau_object *object) 629 { 630 struct nouveau_device *device = (void *)object; 631 632 nvkm_event_fini(&device->event); 633 634 mutex_lock(&nv_devices_mutex); 635 list_del(&device->head); 636 mutex_unlock(&nv_devices_mutex); 637 638 if (nv_subdev(device)->mmio) 639 iounmap(nv_subdev(device)->mmio); 640 641 nouveau_engine_destroy(&device->engine); 642 } 643 644 resource_size_t 645 nv_device_resource_start(struct nouveau_device *device, unsigned int bar) 646 { 647 if (nv_device_is_pci(device)) { 648 return pci_resource_start(device->pdev, bar); 649 } else { 650 struct resource *res; 651 res = platform_get_resource(device->platformdev, 652 IORESOURCE_MEM, bar); 653 if (!res) 654 return 0; 655 return res->start; 656 } 657 } 658 659 resource_size_t 660 nv_device_resource_len(struct nouveau_device *device, unsigned int bar) 661 { 662 if (nv_device_is_pci(device)) { 663 return pci_resource_len(device->pdev, bar); 664 } else { 665 struct resource *res; 666 res = platform_get_resource(device->platformdev, 667 IORESOURCE_MEM, bar); 668 if (!res) 669 return 0; 670 return resource_size(res); 671 } 672 } 673 674 int 675 nv_device_get_irq(struct nouveau_device *device, bool stall) 676 { 677 if (nv_device_is_pci(device)) { 678 return device->pdev->irq; 679 } else { 680 return platform_get_irq_byname(device->platformdev, 681 stall ? "stall" : "nonstall"); 682 } 683 } 684 685 static struct nouveau_oclass 686 nouveau_device_oclass = { 687 .handle = NV_ENGINE(DEVICE, 0x00), 688 .ofuncs = &(struct nouveau_ofuncs) { 689 .dtor = nouveau_device_dtor, 690 .init = nouveau_device_init, 691 .fini = nouveau_device_fini, 692 }, 693 }; 694 695 int 696 nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name, 697 const char *sname, const char *cfg, const char *dbg, 698 int length, void **pobject) 699 { 700 struct nouveau_device *device; 701 int ret = -EEXIST; 702 703 mutex_lock(&nv_devices_mutex); 704 list_for_each_entry(device, &nv_devices, head) { 705 if (device->handle == name) 706 goto done; 707 } 708 709 ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true, 710 "DEVICE", "device", length, pobject); 711 device = *pobject; 712 if (ret) 713 goto done; 714 715 switch (type) { 716 case NOUVEAU_BUS_PCI: 717 device->pdev = dev; 718 break; 719 case NOUVEAU_BUS_PLATFORM: 720 device->platformdev = dev; 721 break; 722 } 723 device->handle = name; 724 device->cfgopt = cfg; 725 device->dbgopt = dbg; 726 device->name = sname; 727 728 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); 729 nv_engine(device)->sclass = nouveau_device_sclass; 730 list_add(&device->head, &nv_devices); 731 732 ret = nvkm_event_init(&nouveau_device_event_func, 1, 1, 733 &device->event); 734 done: 735 mutex_unlock(&nv_devices_mutex); 736 return ret; 737 } 738