1 /* 2 * Copyright 2003 José Fonseca. 3 * Copyright 2003 Leif Delgass. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/export.h> 29 #include <drm/drmP.h> 30 31 /** 32 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. 33 * @dev: DRM device 34 * @size: size of block to allocate 35 * @align: alignment of block 36 * 37 * Return: A handle to the allocated memory block on success or NULL on 38 * failure. 39 */ 40 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 41 { 42 drm_dma_handle_t *dmah; 43 unsigned long addr; 44 size_t sz; 45 46 /* pci_alloc_consistent only guarantees alignment to the smallest 47 * PAGE_SIZE order which is greater than or equal to the requested size. 48 * Return NULL here for now to make sure nobody tries for larger alignment 49 */ 50 if (align > size) 51 return NULL; 52 53 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 54 if (!dmah) 55 return NULL; 56 57 dmah->size = size; 58 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 59 60 if (dmah->vaddr == NULL) { 61 kfree(dmah); 62 return NULL; 63 } 64 65 memset(dmah->vaddr, 0, size); 66 67 /* XXX - Is virt_to_page() legal for consistent mem? */ 68 /* Reserve */ 69 for (addr = (unsigned long)dmah->vaddr, sz = size; 70 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 71 SetPageReserved(virt_to_page((void *)addr)); 72 } 73 74 return dmah; 75 } 76 77 EXPORT_SYMBOL(drm_pci_alloc); 78 79 /* 80 * Free a PCI consistent memory block without freeing its descriptor. 81 * 82 * This function is for internal use in the Linux-specific DRM core code. 83 */ 84 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 85 { 86 unsigned long addr; 87 size_t sz; 88 89 if (dmah->vaddr) { 90 /* XXX - Is virt_to_page() legal for consistent mem? */ 91 /* Unreserve */ 92 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 93 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 94 ClearPageReserved(virt_to_page((void *)addr)); 95 } 96 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 97 dmah->busaddr); 98 } 99 } 100 101 /** 102 * drm_pci_free - Free a PCI consistent memory block 103 * @dev: DRM device 104 * @dmah: handle to memory block 105 */ 106 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 107 { 108 __drm_pci_free(dev, dmah); 109 kfree(dmah); 110 } 111 112 EXPORT_SYMBOL(drm_pci_free); 113 114 #ifdef CONFIG_PCI 115 116 static int drm_get_pci_domain(struct drm_device *dev) 117 { 118 #ifndef __alpha__ 119 /* For historical reasons, drm_get_pci_domain() is busticated 120 * on most archs and has to remain so for userspace interface 121 * < 1.4, except on alpha which was right from the beginning 122 */ 123 if (dev->if_version < 0x10004) 124 return 0; 125 #endif /* __alpha__ */ 126 127 return pci_domain_nr(dev->pdev->bus); 128 } 129 130 static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 131 { 132 int len, ret; 133 master->unique_len = 40; 134 master->unique_size = master->unique_len; 135 master->unique = kmalloc(master->unique_size, GFP_KERNEL); 136 if (master->unique == NULL) 137 return -ENOMEM; 138 139 140 len = snprintf(master->unique, master->unique_len, 141 "pci:%04x:%02x:%02x.%d", 142 drm_get_pci_domain(dev), 143 dev->pdev->bus->number, 144 PCI_SLOT(dev->pdev->devfn), 145 PCI_FUNC(dev->pdev->devfn)); 146 147 if (len >= master->unique_len) { 148 DRM_ERROR("buffer overflow"); 149 ret = -EINVAL; 150 goto err; 151 } else 152 master->unique_len = len; 153 154 return 0; 155 err: 156 return ret; 157 } 158 159 int drm_pci_set_unique(struct drm_device *dev, 160 struct drm_master *master, 161 struct drm_unique *u) 162 { 163 int domain, bus, slot, func, ret; 164 165 master->unique_len = u->unique_len; 166 master->unique_size = u->unique_len + 1; 167 master->unique = kmalloc(master->unique_size, GFP_KERNEL); 168 if (!master->unique) { 169 ret = -ENOMEM; 170 goto err; 171 } 172 173 if (copy_from_user(master->unique, u->unique, master->unique_len)) { 174 ret = -EFAULT; 175 goto err; 176 } 177 178 master->unique[master->unique_len] = '\0'; 179 180 /* Return error if the busid submitted doesn't match the device's actual 181 * busid. 182 */ 183 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 184 if (ret != 3) { 185 ret = -EINVAL; 186 goto err; 187 } 188 189 domain = bus >> 8; 190 bus &= 0xff; 191 192 if ((domain != drm_get_pci_domain(dev)) || 193 (bus != dev->pdev->bus->number) || 194 (slot != PCI_SLOT(dev->pdev->devfn)) || 195 (func != PCI_FUNC(dev->pdev->devfn))) { 196 ret = -EINVAL; 197 goto err; 198 } 199 return 0; 200 err: 201 return ret; 202 } 203 204 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 205 { 206 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 207 (p->busnum & 0xff) != dev->pdev->bus->number || 208 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 209 return -EINVAL; 210 211 p->irq = dev->pdev->irq; 212 213 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 214 p->irq); 215 return 0; 216 } 217 218 /** 219 * drm_irq_by_busid - Get interrupt from bus ID 220 * @dev: DRM device 221 * @data: IOCTL parameter pointing to a drm_irq_busid structure 222 * @file_priv: DRM file private. 223 * 224 * Finds the PCI device with the specified bus id and gets its IRQ number. 225 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 226 * to that of the device that this DRM instance attached to. 227 * 228 * Return: 0 on success or a negative error code on failure. 229 */ 230 int drm_irq_by_busid(struct drm_device *dev, void *data, 231 struct drm_file *file_priv) 232 { 233 struct drm_irq_busid *p = data; 234 235 if (drm_core_check_feature(dev, DRIVER_MODESET)) 236 return -EINVAL; 237 238 /* UMS was only ever support on PCI devices. */ 239 if (WARN_ON(!dev->pdev)) 240 return -EINVAL; 241 242 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 243 return -EINVAL; 244 245 return drm_pci_irq_by_busid(dev, p); 246 } 247 248 static void drm_pci_agp_init(struct drm_device *dev) 249 { 250 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { 251 if (drm_pci_device_is_agp(dev)) 252 dev->agp = drm_agp_init(dev); 253 if (dev->agp) { 254 dev->agp->agp_mtrr = arch_phys_wc_add( 255 dev->agp->agp_info.aper_base, 256 dev->agp->agp_info.aper_size * 257 1024 * 1024); 258 } 259 } 260 } 261 262 void drm_pci_agp_destroy(struct drm_device *dev) 263 { 264 if (dev->agp) { 265 arch_phys_wc_del(dev->agp->agp_mtrr); 266 drm_agp_clear(dev); 267 kfree(dev->agp); 268 dev->agp = NULL; 269 } 270 } 271 272 static struct drm_bus drm_pci_bus = { 273 .set_busid = drm_pci_set_busid, 274 }; 275 276 /** 277 * drm_get_pci_dev - Register a PCI device with the DRM subsystem 278 * @pdev: PCI device 279 * @ent: entry from the PCI ID table that matches @pdev 280 * @driver: DRM device driver 281 * 282 * Attempt to gets inter module "drm" information. If we are first 283 * then register the character device and inter module information. 284 * Try and register, if we fail to register, backout previous work. 285 * 286 * Return: 0 on success or a negative error code on failure. 287 */ 288 int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 289 struct drm_driver *driver) 290 { 291 struct drm_device *dev; 292 int ret; 293 294 DRM_DEBUG("\n"); 295 296 dev = drm_dev_alloc(driver, &pdev->dev); 297 if (!dev) 298 return -ENOMEM; 299 300 ret = pci_enable_device(pdev); 301 if (ret) 302 goto err_free; 303 304 dev->pdev = pdev; 305 #ifdef __alpha__ 306 dev->hose = pdev->sysdata; 307 #endif 308 309 if (drm_core_check_feature(dev, DRIVER_MODESET)) 310 pci_set_drvdata(pdev, dev); 311 312 drm_pci_agp_init(dev); 313 314 ret = drm_dev_register(dev, ent->driver_data); 315 if (ret) 316 goto err_agp; 317 318 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 319 driver->name, driver->major, driver->minor, driver->patchlevel, 320 driver->date, pci_name(pdev), dev->primary->index); 321 322 /* No locking needed since shadow-attach is single-threaded since it may 323 * only be called from the per-driver module init hook. */ 324 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 325 list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); 326 327 return 0; 328 329 err_agp: 330 drm_pci_agp_destroy(dev); 331 pci_disable_device(pdev); 332 err_free: 333 drm_dev_unref(dev); 334 return ret; 335 } 336 EXPORT_SYMBOL(drm_get_pci_dev); 337 338 /** 339 * drm_pci_init - Register matching PCI devices with the DRM subsystem 340 * @driver: DRM device driver 341 * @pdriver: PCI device driver 342 * 343 * Initializes a drm_device structures, registering the stubs and initializing 344 * the AGP device. 345 * 346 * Return: 0 on success or a negative error code on failure. 347 */ 348 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 349 { 350 struct pci_dev *pdev = NULL; 351 const struct pci_device_id *pid; 352 int i; 353 354 DRM_DEBUG("\n"); 355 356 driver->bus = &drm_pci_bus; 357 358 if (driver->driver_features & DRIVER_MODESET) 359 return pci_register_driver(pdriver); 360 361 /* If not using KMS, fall back to stealth mode manual scanning. */ 362 INIT_LIST_HEAD(&driver->legacy_dev_list); 363 for (i = 0; pdriver->id_table[i].vendor != 0; i++) { 364 pid = &pdriver->id_table[i]; 365 366 /* Loop around setting up a DRM device for each PCI device 367 * matching our ID and device class. If we had the internal 368 * function that pci_get_subsys and pci_get_class used, we'd 369 * be able to just pass pid in instead of doing a two-stage 370 * thing. 371 */ 372 pdev = NULL; 373 while ((pdev = 374 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 375 pid->subdevice, pdev)) != NULL) { 376 if ((pdev->class & pid->class_mask) != pid->class) 377 continue; 378 379 /* stealth mode requires a manual probe */ 380 pci_dev_get(pdev); 381 drm_get_pci_dev(pdev, pid, driver); 382 } 383 } 384 return 0; 385 } 386 387 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) 388 { 389 struct pci_dev *root; 390 u32 lnkcap, lnkcap2; 391 392 *mask = 0; 393 if (!dev->pdev) 394 return -EINVAL; 395 396 root = dev->pdev->bus->self; 397 398 /* we've been informed via and serverworks don't make the cut */ 399 if (root->vendor == PCI_VENDOR_ID_VIA || 400 root->vendor == PCI_VENDOR_ID_SERVERWORKS) 401 return -EINVAL; 402 403 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); 404 pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2); 405 406 if (lnkcap2) { /* PCIe r3.0-compliant */ 407 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 408 *mask |= DRM_PCIE_SPEED_25; 409 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 410 *mask |= DRM_PCIE_SPEED_50; 411 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 412 *mask |= DRM_PCIE_SPEED_80; 413 } else { /* pre-r3.0 */ 414 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) 415 *mask |= DRM_PCIE_SPEED_25; 416 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) 417 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50); 418 } 419 420 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); 421 return 0; 422 } 423 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 424 425 #else 426 427 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) 428 { 429 return -1; 430 } 431 432 void drm_pci_agp_destroy(struct drm_device *dev) {} 433 434 int drm_irq_by_busid(struct drm_device *dev, void *data, 435 struct drm_file *file_priv) 436 { 437 return -EINVAL; 438 } 439 440 int drm_pci_set_unique(struct drm_device *dev, 441 struct drm_master *master, 442 struct drm_unique *u) 443 { 444 return -EINVAL; 445 } 446 #endif 447 448 EXPORT_SYMBOL(drm_pci_init); 449 450 /** 451 * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem 452 * @driver: DRM device driver 453 * @pdriver: PCI device driver 454 * 455 * Unregisters one or more devices matched by a PCI driver from the DRM 456 * subsystem. 457 */ 458 void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) 459 { 460 struct drm_device *dev, *tmp; 461 DRM_DEBUG("\n"); 462 463 if (driver->driver_features & DRIVER_MODESET) { 464 pci_unregister_driver(pdriver); 465 } else { 466 list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, 467 legacy_dev_list) { 468 list_del(&dev->legacy_dev_list); 469 drm_put_dev(dev); 470 } 471 } 472 DRM_INFO("Module unloaded\n"); 473 } 474 EXPORT_SYMBOL(drm_pci_exit); 475