1 /* 2 * Copyright 2003 José Fonseca. 3 * Copyright 2003 Leif Delgass. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/export.h> 29 #include <drm/drmP.h> 30 #include "drm_internal.h" 31 #include "drm_legacy.h" 32 33 /** 34 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. 35 * @dev: DRM device 36 * @size: size of block to allocate 37 * @align: alignment of block 38 * 39 * Return: A handle to the allocated memory block on success or NULL on 40 * failure. 41 */ 42 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 43 { 44 drm_dma_handle_t *dmah; 45 unsigned long addr; 46 size_t sz; 47 48 /* pci_alloc_consistent only guarantees alignment to the smallest 49 * PAGE_SIZE order which is greater than or equal to the requested size. 50 * Return NULL here for now to make sure nobody tries for larger alignment 51 */ 52 if (align > size) 53 return NULL; 54 55 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 56 if (!dmah) 57 return NULL; 58 59 dmah->size = size; 60 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 61 62 if (dmah->vaddr == NULL) { 63 kfree(dmah); 64 return NULL; 65 } 66 67 memset(dmah->vaddr, 0, size); 68 69 /* XXX - Is virt_to_page() legal for consistent mem? */ 70 /* Reserve */ 71 for (addr = (unsigned long)dmah->vaddr, sz = size; 72 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 73 SetPageReserved(virt_to_page((void *)addr)); 74 } 75 76 return dmah; 77 } 78 79 EXPORT_SYMBOL(drm_pci_alloc); 80 81 /* 82 * Free a PCI consistent memory block without freeing its descriptor. 83 * 84 * This function is for internal use in the Linux-specific DRM core code. 85 */ 86 void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 87 { 88 unsigned long addr; 89 size_t sz; 90 91 if (dmah->vaddr) { 92 /* XXX - Is virt_to_page() legal for consistent mem? */ 93 /* Unreserve */ 94 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 95 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 96 ClearPageReserved(virt_to_page((void *)addr)); 97 } 98 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 99 dmah->busaddr); 100 } 101 } 102 103 /** 104 * drm_pci_free - Free a PCI consistent memory block 105 * @dev: DRM device 106 * @dmah: handle to memory block 107 */ 108 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 109 { 110 __drm_legacy_pci_free(dev, dmah); 111 kfree(dmah); 112 } 113 114 EXPORT_SYMBOL(drm_pci_free); 115 116 #ifdef CONFIG_PCI 117 118 static int drm_get_pci_domain(struct drm_device *dev) 119 { 120 #ifndef __alpha__ 121 /* For historical reasons, drm_get_pci_domain() is busticated 122 * on most archs and has to remain so for userspace interface 123 * < 1.4, except on alpha which was right from the beginning 124 */ 125 if (dev->if_version < 0x10004) 126 return 0; 127 #endif /* __alpha__ */ 128 129 return pci_domain_nr(dev->pdev->bus); 130 } 131 132 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 133 { 134 master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d", 135 drm_get_pci_domain(dev), 136 dev->pdev->bus->number, 137 PCI_SLOT(dev->pdev->devfn), 138 PCI_FUNC(dev->pdev->devfn)); 139 if (!master->unique) 140 return -ENOMEM; 141 142 master->unique_len = strlen(master->unique); 143 return 0; 144 } 145 EXPORT_SYMBOL(drm_pci_set_busid); 146 147 int drm_pci_set_unique(struct drm_device *dev, 148 struct drm_master *master, 149 struct drm_unique *u) 150 { 151 int domain, bus, slot, func, ret; 152 153 master->unique_len = u->unique_len; 154 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); 155 if (!master->unique) { 156 ret = -ENOMEM; 157 goto err; 158 } 159 160 if (copy_from_user(master->unique, u->unique, master->unique_len)) { 161 ret = -EFAULT; 162 goto err; 163 } 164 165 master->unique[master->unique_len] = '\0'; 166 167 /* Return error if the busid submitted doesn't match the device's actual 168 * busid. 169 */ 170 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 171 if (ret != 3) { 172 ret = -EINVAL; 173 goto err; 174 } 175 176 domain = bus >> 8; 177 bus &= 0xff; 178 179 if ((domain != drm_get_pci_domain(dev)) || 180 (bus != dev->pdev->bus->number) || 181 (slot != PCI_SLOT(dev->pdev->devfn)) || 182 (func != PCI_FUNC(dev->pdev->devfn))) { 183 ret = -EINVAL; 184 goto err; 185 } 186 return 0; 187 err: 188 return ret; 189 } 190 191 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 192 { 193 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 194 (p->busnum & 0xff) != dev->pdev->bus->number || 195 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 196 return -EINVAL; 197 198 p->irq = dev->pdev->irq; 199 200 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 201 p->irq); 202 return 0; 203 } 204 205 /** 206 * drm_irq_by_busid - Get interrupt from bus ID 207 * @dev: DRM device 208 * @data: IOCTL parameter pointing to a drm_irq_busid structure 209 * @file_priv: DRM file private. 210 * 211 * Finds the PCI device with the specified bus id and gets its IRQ number. 212 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 213 * to that of the device that this DRM instance attached to. 214 * 215 * Return: 0 on success or a negative error code on failure. 216 */ 217 int drm_irq_by_busid(struct drm_device *dev, void *data, 218 struct drm_file *file_priv) 219 { 220 struct drm_irq_busid *p = data; 221 222 if (drm_core_check_feature(dev, DRIVER_MODESET)) 223 return -EINVAL; 224 225 /* UMS was only ever support on PCI devices. */ 226 if (WARN_ON(!dev->pdev)) 227 return -EINVAL; 228 229 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 230 return -EINVAL; 231 232 return drm_pci_irq_by_busid(dev, p); 233 } 234 235 static void drm_pci_agp_init(struct drm_device *dev) 236 { 237 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { 238 if (drm_pci_device_is_agp(dev)) 239 dev->agp = drm_agp_init(dev); 240 if (dev->agp) { 241 dev->agp->agp_mtrr = arch_phys_wc_add( 242 dev->agp->agp_info.aper_base, 243 dev->agp->agp_info.aper_size * 244 1024 * 1024); 245 } 246 } 247 } 248 249 void drm_pci_agp_destroy(struct drm_device *dev) 250 { 251 if (dev->agp) { 252 arch_phys_wc_del(dev->agp->agp_mtrr); 253 drm_legacy_agp_clear(dev); 254 kfree(dev->agp); 255 dev->agp = NULL; 256 } 257 } 258 259 /** 260 * drm_get_pci_dev - Register a PCI device with the DRM subsystem 261 * @pdev: PCI device 262 * @ent: entry from the PCI ID table that matches @pdev 263 * @driver: DRM device driver 264 * 265 * Attempt to gets inter module "drm" information. If we are first 266 * then register the character device and inter module information. 267 * Try and register, if we fail to register, backout previous work. 268 * 269 * NOTE: This function is deprecated, please use drm_dev_alloc() and 270 * drm_dev_register() instead and remove your ->load() callback. 271 * 272 * Return: 0 on success or a negative error code on failure. 273 */ 274 int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 275 struct drm_driver *driver) 276 { 277 struct drm_device *dev; 278 int ret; 279 280 DRM_DEBUG("\n"); 281 282 dev = drm_dev_alloc(driver, &pdev->dev); 283 if (!dev) 284 return -ENOMEM; 285 286 ret = pci_enable_device(pdev); 287 if (ret) 288 goto err_free; 289 290 dev->pdev = pdev; 291 #ifdef __alpha__ 292 dev->hose = pdev->sysdata; 293 #endif 294 295 if (drm_core_check_feature(dev, DRIVER_MODESET)) 296 pci_set_drvdata(pdev, dev); 297 298 drm_pci_agp_init(dev); 299 300 ret = drm_dev_register(dev, ent->driver_data); 301 if (ret) 302 goto err_agp; 303 304 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 305 driver->name, driver->major, driver->minor, driver->patchlevel, 306 driver->date, pci_name(pdev), dev->primary->index); 307 308 /* No locking needed since shadow-attach is single-threaded since it may 309 * only be called from the per-driver module init hook. */ 310 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 311 list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); 312 313 return 0; 314 315 err_agp: 316 drm_pci_agp_destroy(dev); 317 pci_disable_device(pdev); 318 err_free: 319 drm_dev_unref(dev); 320 return ret; 321 } 322 EXPORT_SYMBOL(drm_get_pci_dev); 323 324 /** 325 * drm_pci_init - Register matching PCI devices with the DRM subsystem 326 * @driver: DRM device driver 327 * @pdriver: PCI device driver 328 * 329 * Initializes a drm_device structures, registering the stubs and initializing 330 * the AGP device. 331 * 332 * NOTE: This function is deprecated. Modern modesetting drm drivers should use 333 * pci_register_driver() directly, this function only provides shadow-binding 334 * support for old legacy drivers on top of that core pci function. 335 * 336 * Return: 0 on success or a negative error code on failure. 337 */ 338 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 339 { 340 struct pci_dev *pdev = NULL; 341 const struct pci_device_id *pid; 342 int i; 343 344 DRM_DEBUG("\n"); 345 346 if (driver->driver_features & DRIVER_MODESET) 347 return pci_register_driver(pdriver); 348 349 /* If not using KMS, fall back to stealth mode manual scanning. */ 350 INIT_LIST_HEAD(&driver->legacy_dev_list); 351 for (i = 0; pdriver->id_table[i].vendor != 0; i++) { 352 pid = &pdriver->id_table[i]; 353 354 /* Loop around setting up a DRM device for each PCI device 355 * matching our ID and device class. If we had the internal 356 * function that pci_get_subsys and pci_get_class used, we'd 357 * be able to just pass pid in instead of doing a two-stage 358 * thing. 359 */ 360 pdev = NULL; 361 while ((pdev = 362 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 363 pid->subdevice, pdev)) != NULL) { 364 if ((pdev->class & pid->class_mask) != pid->class) 365 continue; 366 367 /* stealth mode requires a manual probe */ 368 pci_dev_get(pdev); 369 drm_get_pci_dev(pdev, pid, driver); 370 } 371 } 372 return 0; 373 } 374 375 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) 376 { 377 struct pci_dev *root; 378 u32 lnkcap, lnkcap2; 379 380 *mask = 0; 381 if (!dev->pdev) 382 return -EINVAL; 383 384 root = dev->pdev->bus->self; 385 386 /* we've been informed via and serverworks don't make the cut */ 387 if (root->vendor == PCI_VENDOR_ID_VIA || 388 root->vendor == PCI_VENDOR_ID_SERVERWORKS) 389 return -EINVAL; 390 391 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 392 pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2); 393 394 if (lnkcap2) { /* PCIe r3.0-compliant */ 395 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 396 *mask |= DRM_PCIE_SPEED_25; 397 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 398 *mask |= DRM_PCIE_SPEED_50; 399 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 400 *mask |= DRM_PCIE_SPEED_80; 401 } else { /* pre-r3.0 */ 402 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) 403 *mask |= DRM_PCIE_SPEED_25; 404 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) 405 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50); 406 } 407 408 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); 409 return 0; 410 } 411 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 412 413 int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw) 414 { 415 struct pci_dev *root; 416 u32 lnkcap; 417 418 *mlw = 0; 419 if (!dev->pdev) 420 return -EINVAL; 421 422 root = dev->pdev->bus->self; 423 424 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 425 426 *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; 427 428 DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap); 429 return 0; 430 } 431 EXPORT_SYMBOL(drm_pcie_get_max_link_width); 432 433 #else 434 435 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 436 { 437 return -1; 438 } 439 440 void drm_pci_agp_destroy(struct drm_device *dev) {} 441 442 int drm_irq_by_busid(struct drm_device *dev, void *data, 443 struct drm_file *file_priv) 444 { 445 return -EINVAL; 446 } 447 448 int drm_pci_set_unique(struct drm_device *dev, 449 struct drm_master *master, 450 struct drm_unique *u) 451 { 452 return -EINVAL; 453 } 454 #endif 455 456 EXPORT_SYMBOL(drm_pci_init); 457 458 /** 459 * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem 460 * @driver: DRM device driver 461 * @pdriver: PCI device driver 462 * 463 * Unregisters one or more devices matched by a PCI driver from the DRM 464 * subsystem. 465 * 466 * NOTE: This function is deprecated. Modern modesetting drm drivers should use 467 * pci_unregister_driver() directly, this function only provides shadow-binding 468 * support for old legacy drivers on top of that core pci function. 469 */ 470 void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) 471 { 472 struct drm_device *dev, *tmp; 473 DRM_DEBUG("\n"); 474 475 if (driver->driver_features & DRIVER_MODESET) { 476 pci_unregister_driver(pdriver); 477 } else { 478 list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, 479 legacy_dev_list) { 480 list_del(&dev->legacy_dev_list); 481 drm_put_dev(dev); 482 } 483 } 484 DRM_INFO("Module unloaded\n"); 485 } 486 EXPORT_SYMBOL(drm_pci_exit); 487