1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include "qxl_drv.h" 27 #include "qxl_object.h" 28 29 #include <linux/io-mapping.h> 30 31 int qxl_log_level; 32 33 static void qxl_dump_mode(struct qxl_device *qdev, void *p) 34 { 35 struct qxl_mode *m = p; 36 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n", 37 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili, 38 m->y_mili, m->orientation); 39 } 40 41 static bool qxl_check_device(struct qxl_device *qdev) 42 { 43 struct qxl_rom *rom = qdev->rom; 44 int mode_offset; 45 int i; 46 47 if (rom->magic != 0x4f525851) { 48 DRM_ERROR("bad rom signature %x\n", rom->magic); 49 return false; 50 } 51 52 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); 53 DRM_INFO("Compression level %d log level %d\n", rom->compression_level, 54 rom->log_level); 55 DRM_INFO("Currently using mode #%d, list at 0x%x\n", 56 rom->mode, rom->modes_offset); 57 DRM_INFO("%d io pages at offset 0x%x\n", 58 rom->num_io_pages, rom->pages_offset); 59 DRM_INFO("%d byte draw area at offset 0x%x\n", 60 rom->surface0_area_size, rom->draw_area_offset); 61 62 qdev->vram_size = rom->surface0_area_size; 63 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); 64 65 mode_offset = rom->modes_offset / 4; 66 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; 67 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset, 68 qdev->mode_info.num_modes); 69 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); 70 for (i = 0; i < qdev->mode_info.num_modes; i++) 71 qxl_dump_mode(qdev, qdev->mode_info.modes + i); 72 return true; 73 } 74 75 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, 76 unsigned long start_phys_addr, unsigned long end_phys_addr) 77 { 78 uint64_t high_bits; 79 struct qxl_memslot *slot; 80 uint8_t slot_index; 81 struct qxl_ram_header *ram_header = qdev->ram_header; 82 83 slot_index = qdev->rom->slots_start + slot_index_offset; 84 slot = &qdev->mem_slots[slot_index]; 85 slot->start_phys_addr = start_phys_addr; 86 slot->end_phys_addr = end_phys_addr; 87 ram_header->mem_slot.mem_start = slot->start_phys_addr; 88 ram_header->mem_slot.mem_end = slot->end_phys_addr; 89 qxl_io_memslot_add(qdev, slot_index); 90 slot->generation = qdev->rom->slot_generation; 91 high_bits = slot_index << qdev->slot_gen_bits; 92 high_bits |= slot->generation; 93 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); 94 slot->high_bits = high_bits; 95 return slot_index; 96 } 97 98 static void qxl_gc_work(struct work_struct *work) 99 { 100 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); 101 qxl_garbage_collect(qdev); 102 } 103 104 int qxl_device_init(struct qxl_device *qdev, 105 struct drm_device *ddev, 106 struct pci_dev *pdev, 107 unsigned long flags) 108 { 109 int r; 110 111 qdev->dev = &pdev->dev; 112 qdev->ddev = ddev; 113 qdev->pdev = pdev; 114 qdev->flags = flags; 115 116 mutex_init(&qdev->gem.mutex); 117 mutex_init(&qdev->update_area_mutex); 118 mutex_init(&qdev->release_mutex); 119 mutex_init(&qdev->surf_evict_mutex); 120 INIT_LIST_HEAD(&qdev->gem.objects); 121 122 qdev->rom_base = pci_resource_start(pdev, 2); 123 qdev->rom_size = pci_resource_len(pdev, 2); 124 qdev->vram_base = pci_resource_start(pdev, 0); 125 qdev->surfaceram_base = pci_resource_start(pdev, 1); 126 qdev->surfaceram_size = pci_resource_len(pdev, 1); 127 qdev->io_base = pci_resource_start(pdev, 3); 128 129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 130 qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); 131 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n", 132 (unsigned long long)qdev->vram_base, 133 (unsigned long long)pci_resource_end(pdev, 0), 134 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 135 (int)pci_resource_len(pdev, 0) / 1024, 136 (unsigned long long)qdev->surfaceram_base, 137 (unsigned long long)pci_resource_end(pdev, 1), 138 (int)qdev->surfaceram_size / 1024 / 1024, 139 (int)qdev->surfaceram_size / 1024); 140 141 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 142 if (!qdev->rom) { 143 pr_err("Unable to ioremap ROM\n"); 144 return -ENOMEM; 145 } 146 147 qxl_check_device(qdev); 148 149 r = qxl_bo_init(qdev); 150 if (r) { 151 DRM_ERROR("bo init failed %d\n", r); 152 return r; 153 } 154 155 qdev->ram_header = ioremap(qdev->vram_base + 156 qdev->rom->ram_header_offset, 157 sizeof(*qdev->ram_header)); 158 159 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), 160 sizeof(struct qxl_command), 161 QXL_COMMAND_RING_SIZE, 162 qdev->io_base + QXL_IO_NOTIFY_CMD, 163 false, 164 &qdev->display_event); 165 166 qdev->cursor_ring = qxl_ring_create( 167 &(qdev->ram_header->cursor_ring_hdr), 168 sizeof(struct qxl_command), 169 QXL_CURSOR_RING_SIZE, 170 qdev->io_base + QXL_IO_NOTIFY_CMD, 171 false, 172 &qdev->cursor_event); 173 174 qdev->release_ring = qxl_ring_create( 175 &(qdev->ram_header->release_ring_hdr), 176 sizeof(uint64_t), 177 QXL_RELEASE_RING_SIZE, 0, true, 178 NULL); 179 180 /* TODO - slot initialization should happen on reset. where is our 181 * reset handler? */ 182 qdev->n_mem_slots = qdev->rom->slots_end; 183 qdev->slot_gen_bits = qdev->rom->slot_gen_bits; 184 qdev->slot_id_bits = qdev->rom->slot_id_bits; 185 qdev->va_slot_mask = 186 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); 187 188 qdev->mem_slots = 189 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), 190 GFP_KERNEL); 191 192 idr_init(&qdev->release_idr); 193 spin_lock_init(&qdev->release_idr_lock); 194 195 idr_init(&qdev->surf_id_idr); 196 spin_lock_init(&qdev->surf_id_idr_lock); 197 198 mutex_init(&qdev->async_io_mutex); 199 200 /* reset the device into a known state - no memslots, no primary 201 * created, no surfaces. */ 202 qxl_io_reset(qdev); 203 204 /* must initialize irq before first async io - slot creation */ 205 r = qxl_irq_init(qdev); 206 if (r) 207 return r; 208 209 /* 210 * Note that virtual is surface0. We rely on the single ioremap done 211 * before. 212 */ 213 qdev->main_mem_slot = setup_slot(qdev, 0, 214 (unsigned long)qdev->vram_base, 215 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); 216 qdev->surfaces_mem_slot = setup_slot(qdev, 1, 217 (unsigned long)qdev->surfaceram_base, 218 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); 219 DRM_INFO("main mem slot %d [%lx,%x)\n", 220 qdev->main_mem_slot, 221 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); 222 223 224 qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); 225 INIT_WORK(&qdev->gc_work, qxl_gc_work); 226 227 r = qxl_fb_init(qdev); 228 if (r) 229 return r; 230 231 return 0; 232 } 233 234 static void qxl_device_fini(struct qxl_device *qdev) 235 { 236 if (qdev->current_release_bo[0]) 237 qxl_bo_unref(&qdev->current_release_bo[0]); 238 if (qdev->current_release_bo[1]) 239 qxl_bo_unref(&qdev->current_release_bo[1]); 240 flush_workqueue(qdev->gc_queue); 241 destroy_workqueue(qdev->gc_queue); 242 qdev->gc_queue = NULL; 243 244 qxl_ring_free(qdev->command_ring); 245 qxl_ring_free(qdev->cursor_ring); 246 qxl_ring_free(qdev->release_ring); 247 qxl_bo_fini(qdev); 248 io_mapping_free(qdev->surface_mapping); 249 io_mapping_free(qdev->vram_mapping); 250 iounmap(qdev->ram_header); 251 iounmap(qdev->rom); 252 qdev->rom = NULL; 253 qdev->mode_info.modes = NULL; 254 qdev->mode_info.num_modes = 0; 255 qxl_debugfs_remove_files(qdev); 256 } 257 258 int qxl_driver_unload(struct drm_device *dev) 259 { 260 struct qxl_device *qdev = dev->dev_private; 261 262 if (qdev == NULL) 263 return 0; 264 qxl_modeset_fini(qdev); 265 qxl_device_fini(qdev); 266 267 kfree(qdev); 268 dev->dev_private = NULL; 269 return 0; 270 } 271 272 int qxl_driver_load(struct drm_device *dev, unsigned long flags) 273 { 274 struct qxl_device *qdev; 275 int r; 276 277 /* require kms */ 278 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 279 return -ENODEV; 280 281 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); 282 if (qdev == NULL) 283 return -ENOMEM; 284 285 dev->dev_private = qdev; 286 287 r = qxl_device_init(qdev, dev, dev->pdev, flags); 288 if (r) 289 goto out; 290 291 r = qxl_modeset_init(qdev); 292 if (r) { 293 qxl_driver_unload(dev); 294 goto out; 295 } 296 297 return 0; 298 out: 299 kfree(qdev); 300 return r; 301 } 302 303 304