1 /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ 2 /** 3 * \file drm_pci.c 4 * \brief Functions and ioctls to manage PCI memory 5 * 6 * \warning These interfaces aren't stable yet. 7 * 8 * \todo Implement the remaining ioctl's for the PCI pools. 9 * \todo The wrappers here are so thin that they would be better off inlined.. 10 * 11 * \author José Fonseca <jrfonseca@tungstengraphics.com> 12 * \author Leif Delgass <ldelgass@retinalburn.net> 13 */ 14 15 /* 16 * Copyright 2003 José Fonseca. 17 * Copyright 2003 Leif Delgass. 18 * All Rights Reserved. 19 * 20 * Permission is hereby granted, free of charge, to any person obtaining a 21 * copy of this software and associated documentation files (the "Software"), 22 * to deal in the Software without restriction, including without limitation 23 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 24 * and/or sell copies of the Software, and to permit persons to whom the 25 * Software is furnished to do so, subject to the following conditions: 26 * 27 * The above copyright notice and this permission notice (including the next 28 * paragraph) shall be included in all copies or substantial portions of the 29 * Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 34 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 36 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 37 */ 38 39 #include <linux/pci.h> 40 #include <linux/slab.h> 41 #include <linux/dma-mapping.h> 42 #include "drmP.h" 43 44 /**********************************************************************/ 45 /** \name PCI memory */ 46 /*@{*/ 47 48 /** 49 * \brief Allocate a PCI consistent memory block, for DMA. 50 */ 51 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) 52 { 53 drm_dma_handle_t *dmah; 54 #if 1 55 unsigned long addr; 56 size_t sz; 57 #endif 58 59 /* pci_alloc_consistent only guarantees alignment to the smallest 60 * PAGE_SIZE order which is greater than or equal to the requested size. 61 * Return NULL here for now to make sure nobody tries for larger alignment 62 */ 63 if (align > size) 64 return NULL; 65 66 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 67 if (!dmah) 68 return NULL; 69 70 dmah->size = size; 71 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); 72 73 if (dmah->vaddr == NULL) { 74 kfree(dmah); 75 return NULL; 76 } 77 78 memset(dmah->vaddr, 0, size); 79 80 /* XXX - Is virt_to_page() legal for consistent mem? */ 81 /* Reserve */ 82 for (addr = (unsigned long)dmah->vaddr, sz = size; 83 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 84 SetPageReserved(virt_to_page(addr)); 85 } 86 87 return dmah; 88 } 89 90 EXPORT_SYMBOL(drm_pci_alloc); 91 92 /** 93 * \brief Free a PCI consistent memory block without freeing its descriptor. 94 * 95 * This function is for internal use in the Linux-specific DRM core code. 96 */ 97 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 98 { 99 #if 1 100 unsigned long addr; 101 size_t sz; 102 #endif 103 104 if (dmah->vaddr) { 105 /* XXX - Is virt_to_page() legal for consistent mem? */ 106 /* Unreserve */ 107 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; 108 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { 109 ClearPageReserved(virt_to_page(addr)); 110 } 111 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, 112 dmah->busaddr); 113 } 114 } 115 116 /** 117 * \brief Free a PCI consistent memory block 118 */ 119 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 120 { 121 __drm_pci_free(dev, dmah); 122 kfree(dmah); 123 } 124 125 EXPORT_SYMBOL(drm_pci_free); 126 127 #ifdef CONFIG_PCI 128 /** 129 * Register. 130 * 131 * \param pdev - PCI device structure 132 * \param ent entry from the PCI ID table with device type flags 133 * \return zero on success or a negative number on failure. 134 * 135 * Attempt to gets inter module "drm" information. If we are first 136 * then register the character device and inter module information. 137 * Try and register, if we fail to register, backout previous work. 138 */ 139 int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 140 struct drm_driver *driver) 141 { 142 struct drm_device *dev; 143 int ret; 144 145 DRM_DEBUG("\n"); 146 147 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 148 if (!dev) 149 return -ENOMEM; 150 151 ret = pci_enable_device(pdev); 152 if (ret) 153 goto err_g1; 154 155 pci_set_master(pdev); 156 157 dev->pdev = pdev; 158 dev->dev = &pdev->dev; 159 160 dev->pci_device = pdev->device; 161 dev->pci_vendor = pdev->vendor; 162 163 #ifdef __alpha__ 164 dev->hose = pdev->sysdata; 165 #endif 166 167 mutex_lock(&drm_global_mutex); 168 169 if ((ret = drm_fill_in_dev(dev, ent, driver))) { 170 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 171 goto err_g2; 172 } 173 174 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 175 pci_set_drvdata(pdev, dev); 176 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); 177 if (ret) 178 goto err_g2; 179 } 180 181 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 182 goto err_g3; 183 184 if (dev->driver->load) { 185 ret = dev->driver->load(dev, ent->driver_data); 186 if (ret) 187 goto err_g4; 188 } 189 190 /* setup the grouping for the legacy output */ 191 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 192 ret = drm_mode_group_init_legacy_group(dev, 193 &dev->primary->mode_group); 194 if (ret) 195 goto err_g4; 196 } 197 198 list_add_tail(&dev->driver_item, &driver->device_list); 199 200 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 201 driver->name, driver->major, driver->minor, driver->patchlevel, 202 driver->date, pci_name(pdev), dev->primary->index); 203 204 mutex_unlock(&drm_global_mutex); 205 return 0; 206 207 err_g4: 208 drm_put_minor(&dev->primary); 209 err_g3: 210 if (drm_core_check_feature(dev, DRIVER_MODESET)) 211 drm_put_minor(&dev->control); 212 err_g2: 213 pci_disable_device(pdev); 214 err_g1: 215 kfree(dev); 216 mutex_unlock(&drm_global_mutex); 217 return ret; 218 } 219 EXPORT_SYMBOL(drm_get_pci_dev); 220 221 /** 222 * PCI device initialization. Called via drm_init at module load time, 223 * 224 * \return zero on success or a negative number on failure. 225 * 226 * Initializes a drm_device structures,registering the 227 * stubs and initializing the AGP device. 228 * 229 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and 230 * after the initialization for driver customization. 231 */ 232 int drm_pci_init(struct drm_driver *driver) 233 { 234 struct pci_dev *pdev = NULL; 235 const struct pci_device_id *pid; 236 int i; 237 238 if (driver->driver_features & DRIVER_MODESET) 239 return pci_register_driver(&driver->pci_driver); 240 241 /* If not using KMS, fall back to stealth mode manual scanning. */ 242 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 243 pid = &driver->pci_driver.id_table[i]; 244 245 /* Loop around setting up a DRM device for each PCI device 246 * matching our ID and device class. If we had the internal 247 * function that pci_get_subsys and pci_get_class used, we'd 248 * be able to just pass pid in instead of doing a two-stage 249 * thing. 250 */ 251 pdev = NULL; 252 while ((pdev = 253 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 254 pid->subdevice, pdev)) != NULL) { 255 if ((pdev->class & pid->class_mask) != pid->class) 256 continue; 257 258 /* stealth mode requires a manual probe */ 259 pci_dev_get(pdev); 260 drm_get_pci_dev(pdev, pid, driver); 261 } 262 } 263 return 0; 264 } 265 266 #else 267 268 int drm_pci_init(struct drm_driver *driver) 269 { 270 return -1; 271 } 272 273 #endif 274 /*@}*/ 275