1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include "qxl_drv.h" 27 #include "qxl_object.h" 28 29 #include <linux/io-mapping.h> 30 31 int qxl_log_level; 32 33 static void qxl_dump_mode(struct qxl_device *qdev, void *p) 34 { 35 struct qxl_mode *m = p; 36 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n", 37 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili, 38 m->y_mili, m->orientation); 39 } 40 41 static bool qxl_check_device(struct qxl_device *qdev) 42 { 43 struct qxl_rom *rom = qdev->rom; 44 int mode_offset; 45 int i; 46 47 if (rom->magic != 0x4f525851) { 48 DRM_ERROR("bad rom signature %x\n", rom->magic); 49 return false; 50 } 51 52 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); 53 DRM_INFO("Compression level %d log level %d\n", rom->compression_level, 54 rom->log_level); 55 DRM_INFO("Currently using mode #%d, list at 0x%x\n", 56 rom->mode, rom->modes_offset); 57 DRM_INFO("%d io pages at offset 0x%x\n", 58 rom->num_io_pages, rom->pages_offset); 59 DRM_INFO("%d byte draw area at offset 0x%x\n", 60 rom->surface0_area_size, rom->draw_area_offset); 61 62 qdev->vram_size = rom->surface0_area_size; 63 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); 64 65 mode_offset = rom->modes_offset / 4; 66 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; 67 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset, 68 qdev->mode_info.num_modes); 69 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); 70 for (i = 0; i < qdev->mode_info.num_modes; i++) 71 qxl_dump_mode(qdev, qdev->mode_info.modes + i); 72 return true; 73 } 74 75 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, 76 unsigned long start_phys_addr, unsigned long end_phys_addr) 77 { 78 uint64_t high_bits; 79 struct qxl_memslot *slot; 80 uint8_t slot_index; 81 struct qxl_ram_header *ram_header = qdev->ram_header; 82 83 slot_index = qdev->rom->slots_start + slot_index_offset; 84 slot = &qdev->mem_slots[slot_index]; 85 slot->start_phys_addr = start_phys_addr; 86 slot->end_phys_addr = end_phys_addr; 87 ram_header->mem_slot.mem_start = slot->start_phys_addr; 88 ram_header->mem_slot.mem_end = slot->end_phys_addr; 89 qxl_io_memslot_add(qdev, slot_index); 90 slot->generation = qdev->rom->slot_generation; 91 high_bits = slot_index << qdev->slot_gen_bits; 92 high_bits |= slot->generation; 93 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); 94 slot->high_bits = high_bits; 95 return slot_index; 96 } 97 98 static void qxl_gc_work(struct work_struct *work) 99 { 100 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); 101 qxl_garbage_collect(qdev); 102 } 103 104 int qxl_device_init(struct qxl_device *qdev, 105 struct drm_device *ddev, 106 struct pci_dev *pdev, 107 unsigned long flags) 108 { 109 int r; 110 111 qdev->dev = &pdev->dev; 112 qdev->ddev = ddev; 113 qdev->pdev = pdev; 114 qdev->flags = flags; 115 116 mutex_init(&qdev->gem.mutex); 117 mutex_init(&qdev->update_area_mutex); 118 mutex_init(&qdev->release_mutex); 119 mutex_init(&qdev->surf_evict_mutex); 120 INIT_LIST_HEAD(&qdev->gem.objects); 121 122 qdev->rom_base = pci_resource_start(pdev, 2); 123 qdev->rom_size = pci_resource_len(pdev, 2); 124 qdev->vram_base = pci_resource_start(pdev, 0); 125 qdev->surfaceram_base = pci_resource_start(pdev, 1); 126 qdev->surfaceram_size = pci_resource_len(pdev, 1); 127 qdev->io_base = pci_resource_start(pdev, 3); 128 129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); 131 DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", 132 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), 133 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 134 (int)pci_resource_len(pdev, 0) / 1024, 135 (void *)qdev->surfaceram_base, 136 (void *)pci_resource_end(pdev, 1), 137 (int)qdev->surfaceram_size / 1024 / 1024, 138 (int)qdev->surfaceram_size / 1024); 139 140 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 141 if (!qdev->rom) { 142 pr_err("Unable to ioremap ROM\n"); 143 return -ENOMEM; 144 } 145 146 qxl_check_device(qdev); 147 148 r = qxl_bo_init(qdev); 149 if (r) { 150 DRM_ERROR("bo init failed %d\n", r); 151 return r; 152 } 153 154 qdev->ram_header = ioremap(qdev->vram_base + 155 qdev->rom->ram_header_offset, 156 sizeof(*qdev->ram_header)); 157 158 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), 159 sizeof(struct qxl_command), 160 QXL_COMMAND_RING_SIZE, 161 qdev->io_base + QXL_IO_NOTIFY_CMD, 162 false, 163 &qdev->display_event); 164 165 qdev->cursor_ring = qxl_ring_create( 166 &(qdev->ram_header->cursor_ring_hdr), 167 sizeof(struct qxl_command), 168 QXL_CURSOR_RING_SIZE, 169 qdev->io_base + QXL_IO_NOTIFY_CMD, 170 false, 171 &qdev->cursor_event); 172 173 qdev->release_ring = qxl_ring_create( 174 &(qdev->ram_header->release_ring_hdr), 175 sizeof(uint64_t), 176 QXL_RELEASE_RING_SIZE, 0, true, 177 NULL); 178 179 /* TODO - slot initialization should happen on reset. where is our 180 * reset handler? */ 181 qdev->n_mem_slots = qdev->rom->slots_end; 182 qdev->slot_gen_bits = qdev->rom->slot_gen_bits; 183 qdev->slot_id_bits = qdev->rom->slot_id_bits; 184 qdev->va_slot_mask = 185 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); 186 187 qdev->mem_slots = 188 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), 189 GFP_KERNEL); 190 191 idr_init(&qdev->release_idr); 192 spin_lock_init(&qdev->release_idr_lock); 193 194 idr_init(&qdev->surf_id_idr); 195 spin_lock_init(&qdev->surf_id_idr_lock); 196 197 mutex_init(&qdev->async_io_mutex); 198 199 /* reset the device into a known state - no memslots, no primary 200 * created, no surfaces. */ 201 qxl_io_reset(qdev); 202 203 /* must initialize irq before first async io - slot creation */ 204 r = qxl_irq_init(qdev); 205 if (r) 206 return r; 207 208 /* 209 * Note that virtual is surface0. We rely on the single ioremap done 210 * before. 211 */ 212 qdev->main_mem_slot = setup_slot(qdev, 0, 213 (unsigned long)qdev->vram_base, 214 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); 215 qdev->surfaces_mem_slot = setup_slot(qdev, 1, 216 (unsigned long)qdev->surfaceram_base, 217 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); 218 DRM_INFO("main mem slot %d [%lx,%x)\n", 219 qdev->main_mem_slot, 220 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); 221 222 223 qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); 224 INIT_WORK(&qdev->gc_work, qxl_gc_work); 225 226 r = qxl_fb_init(qdev); 227 if (r) 228 return r; 229 230 return 0; 231 } 232 233 static void qxl_device_fini(struct qxl_device *qdev) 234 { 235 if (qdev->current_release_bo[0]) 236 qxl_bo_unref(&qdev->current_release_bo[0]); 237 if (qdev->current_release_bo[1]) 238 qxl_bo_unref(&qdev->current_release_bo[1]); 239 flush_workqueue(qdev->gc_queue); 240 destroy_workqueue(qdev->gc_queue); 241 qdev->gc_queue = NULL; 242 243 qxl_ring_free(qdev->command_ring); 244 qxl_ring_free(qdev->cursor_ring); 245 qxl_ring_free(qdev->release_ring); 246 qxl_bo_fini(qdev); 247 io_mapping_free(qdev->surface_mapping); 248 io_mapping_free(qdev->vram_mapping); 249 iounmap(qdev->ram_header); 250 iounmap(qdev->rom); 251 qdev->rom = NULL; 252 qdev->mode_info.modes = NULL; 253 qdev->mode_info.num_modes = 0; 254 qxl_debugfs_remove_files(qdev); 255 } 256 257 int qxl_driver_unload(struct drm_device *dev) 258 { 259 struct qxl_device *qdev = dev->dev_private; 260 261 if (qdev == NULL) 262 return 0; 263 qxl_modeset_fini(qdev); 264 qxl_device_fini(qdev); 265 266 kfree(qdev); 267 dev->dev_private = NULL; 268 return 0; 269 } 270 271 int qxl_driver_load(struct drm_device *dev, unsigned long flags) 272 { 273 struct qxl_device *qdev; 274 int r; 275 276 /* require kms */ 277 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 278 return -ENODEV; 279 280 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); 281 if (qdev == NULL) 282 return -ENOMEM; 283 284 dev->dev_private = qdev; 285 286 r = qxl_device_init(qdev, dev, dev->pdev, flags); 287 if (r) 288 goto out; 289 290 r = qxl_modeset_init(qdev); 291 if (r) { 292 qxl_driver_unload(dev); 293 goto out; 294 } 295 296 return 0; 297 out: 298 kfree(qdev); 299 return r; 300 } 301 302 303