1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include "qxl_drv.h" 27 #include "qxl_object.h" 28 29 #include <drm/drm_crtc_helper.h> 30 #include <linux/io-mapping.h> 31 32 int qxl_log_level; 33 34 static void qxl_dump_mode(struct qxl_device *qdev, void *p) 35 { 36 struct qxl_mode *m = p; 37 DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n", 38 m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili, 39 m->y_mili, m->orientation); 40 } 41 42 static bool qxl_check_device(struct qxl_device *qdev) 43 { 44 struct qxl_rom *rom = qdev->rom; 45 int mode_offset; 46 int i; 47 48 if (rom->magic != 0x4f525851) { 49 DRM_ERROR("bad rom signature %x\n", rom->magic); 50 return false; 51 } 52 53 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); 54 DRM_INFO("Compression level %d log level %d\n", rom->compression_level, 55 rom->log_level); 56 DRM_INFO("Currently using mode #%d, list at 0x%x\n", 57 rom->mode, rom->modes_offset); 58 DRM_INFO("%d io pages at offset 0x%x\n", 59 rom->num_io_pages, rom->pages_offset); 60 DRM_INFO("%d byte draw area at offset 0x%x\n", 61 rom->surface0_area_size, rom->draw_area_offset); 62 63 qdev->vram_size = rom->surface0_area_size; 64 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); 65 66 mode_offset = rom->modes_offset / 4; 67 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; 68 DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset, 69 qdev->mode_info.num_modes); 70 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); 71 for (i = 0; i < qdev->mode_info.num_modes; i++) 72 qxl_dump_mode(qdev, qdev->mode_info.modes + i); 73 return true; 74 } 75 76 static void setup_hw_slot(struct qxl_device *qdev, int slot_index, 77 struct qxl_memslot *slot) 78 { 79 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; 80 qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr; 81 qxl_io_memslot_add(qdev, slot_index); 82 } 83 84 static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, 85 unsigned long start_phys_addr, unsigned long end_phys_addr) 86 { 87 uint64_t high_bits; 88 struct qxl_memslot *slot; 89 uint8_t slot_index; 90 91 slot_index = qdev->rom->slots_start + slot_index_offset; 92 slot = &qdev->mem_slots[slot_index]; 93 slot->start_phys_addr = start_phys_addr; 94 slot->end_phys_addr = end_phys_addr; 95 96 setup_hw_slot(qdev, slot_index, slot); 97 98 slot->generation = qdev->rom->slot_generation; 99 high_bits = slot_index << qdev->slot_gen_bits; 100 high_bits |= slot->generation; 101 high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); 102 slot->high_bits = high_bits; 103 return slot_index; 104 } 105 106 void qxl_reinit_memslots(struct qxl_device *qdev) 107 { 108 setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]); 109 setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]); 110 } 111 112 static void qxl_gc_work(struct work_struct *work) 113 { 114 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); 115 qxl_garbage_collect(qdev); 116 } 117 118 static int qxl_device_init(struct qxl_device *qdev, 119 struct drm_device *ddev, 120 struct pci_dev *pdev, 121 unsigned long flags) 122 { 123 int r, sb; 124 125 qdev->dev = &pdev->dev; 126 qdev->ddev = ddev; 127 qdev->pdev = pdev; 128 qdev->flags = flags; 129 130 mutex_init(&qdev->gem.mutex); 131 mutex_init(&qdev->update_area_mutex); 132 mutex_init(&qdev->release_mutex); 133 mutex_init(&qdev->surf_evict_mutex); 134 INIT_LIST_HEAD(&qdev->gem.objects); 135 136 qdev->rom_base = pci_resource_start(pdev, 2); 137 qdev->rom_size = pci_resource_len(pdev, 2); 138 qdev->vram_base = pci_resource_start(pdev, 0); 139 qdev->io_base = pci_resource_start(pdev, 3); 140 141 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); 142 143 if (pci_resource_len(pdev, 4) > 0) { 144 /* 64bit surface bar present */ 145 sb = 4; 146 qdev->surfaceram_base = pci_resource_start(pdev, sb); 147 qdev->surfaceram_size = pci_resource_len(pdev, sb); 148 qdev->surface_mapping = 149 io_mapping_create_wc(qdev->surfaceram_base, 150 qdev->surfaceram_size); 151 } 152 if (qdev->surface_mapping == NULL) { 153 /* 64bit surface bar not present (or mapping failed) */ 154 sb = 1; 155 qdev->surfaceram_base = pci_resource_start(pdev, sb); 156 qdev->surfaceram_size = pci_resource_len(pdev, sb); 157 qdev->surface_mapping = 158 io_mapping_create_wc(qdev->surfaceram_base, 159 qdev->surfaceram_size); 160 } 161 162 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n", 163 (unsigned long long)qdev->vram_base, 164 (unsigned long long)pci_resource_end(pdev, 0), 165 (int)pci_resource_len(pdev, 0) / 1024 / 1024, 166 (int)pci_resource_len(pdev, 0) / 1024, 167 (unsigned long long)qdev->surfaceram_base, 168 (unsigned long long)pci_resource_end(pdev, sb), 169 (int)qdev->surfaceram_size / 1024 / 1024, 170 (int)qdev->surfaceram_size / 1024, 171 (sb == 4) ? "64bit" : "32bit"); 172 173 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); 174 if (!qdev->rom) { 175 pr_err("Unable to ioremap ROM\n"); 176 return -ENOMEM; 177 } 178 179 qxl_check_device(qdev); 180 181 r = qxl_bo_init(qdev); 182 if (r) { 183 DRM_ERROR("bo init failed %d\n", r); 184 return r; 185 } 186 187 qdev->ram_header = ioremap(qdev->vram_base + 188 qdev->rom->ram_header_offset, 189 sizeof(*qdev->ram_header)); 190 191 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), 192 sizeof(struct qxl_command), 193 QXL_COMMAND_RING_SIZE, 194 qdev->io_base + QXL_IO_NOTIFY_CMD, 195 false, 196 &qdev->display_event); 197 198 qdev->cursor_ring = qxl_ring_create( 199 &(qdev->ram_header->cursor_ring_hdr), 200 sizeof(struct qxl_command), 201 QXL_CURSOR_RING_SIZE, 202 qdev->io_base + QXL_IO_NOTIFY_CMD, 203 false, 204 &qdev->cursor_event); 205 206 qdev->release_ring = qxl_ring_create( 207 &(qdev->ram_header->release_ring_hdr), 208 sizeof(uint64_t), 209 QXL_RELEASE_RING_SIZE, 0, true, 210 NULL); 211 212 /* TODO - slot initialization should happen on reset. where is our 213 * reset handler? */ 214 qdev->n_mem_slots = qdev->rom->slots_end; 215 qdev->slot_gen_bits = qdev->rom->slot_gen_bits; 216 qdev->slot_id_bits = qdev->rom->slot_id_bits; 217 qdev->va_slot_mask = 218 (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); 219 220 qdev->mem_slots = 221 kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), 222 GFP_KERNEL); 223 224 idr_init(&qdev->release_idr); 225 spin_lock_init(&qdev->release_idr_lock); 226 227 idr_init(&qdev->surf_id_idr); 228 spin_lock_init(&qdev->surf_id_idr_lock); 229 230 mutex_init(&qdev->async_io_mutex); 231 232 /* reset the device into a known state - no memslots, no primary 233 * created, no surfaces. */ 234 qxl_io_reset(qdev); 235 236 /* must initialize irq before first async io - slot creation */ 237 r = qxl_irq_init(qdev); 238 if (r) 239 return r; 240 241 /* 242 * Note that virtual is surface0. We rely on the single ioremap done 243 * before. 244 */ 245 qdev->main_mem_slot = setup_slot(qdev, 0, 246 (unsigned long)qdev->vram_base, 247 (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); 248 qdev->surfaces_mem_slot = setup_slot(qdev, 1, 249 (unsigned long)qdev->surfaceram_base, 250 (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); 251 DRM_INFO("main mem slot %d [%lx,%x]\n", 252 qdev->main_mem_slot, 253 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); 254 DRM_INFO("surface mem slot %d [%lx,%lx]\n", 255 qdev->surfaces_mem_slot, 256 (unsigned long)qdev->surfaceram_base, 257 (unsigned long)qdev->surfaceram_size); 258 259 260 qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); 261 INIT_WORK(&qdev->gc_work, qxl_gc_work); 262 263 r = qxl_fb_init(qdev); 264 if (r) 265 return r; 266 267 return 0; 268 } 269 270 static void qxl_device_fini(struct qxl_device *qdev) 271 { 272 if (qdev->current_release_bo[0]) 273 qxl_bo_unref(&qdev->current_release_bo[0]); 274 if (qdev->current_release_bo[1]) 275 qxl_bo_unref(&qdev->current_release_bo[1]); 276 flush_workqueue(qdev->gc_queue); 277 destroy_workqueue(qdev->gc_queue); 278 qdev->gc_queue = NULL; 279 280 qxl_ring_free(qdev->command_ring); 281 qxl_ring_free(qdev->cursor_ring); 282 qxl_ring_free(qdev->release_ring); 283 qxl_bo_fini(qdev); 284 io_mapping_free(qdev->surface_mapping); 285 io_mapping_free(qdev->vram_mapping); 286 iounmap(qdev->ram_header); 287 iounmap(qdev->rom); 288 qdev->rom = NULL; 289 qdev->mode_info.modes = NULL; 290 qdev->mode_info.num_modes = 0; 291 qxl_debugfs_remove_files(qdev); 292 } 293 294 int qxl_driver_unload(struct drm_device *dev) 295 { 296 struct qxl_device *qdev = dev->dev_private; 297 298 if (qdev == NULL) 299 return 0; 300 qxl_modeset_fini(qdev); 301 qxl_device_fini(qdev); 302 303 kfree(qdev); 304 dev->dev_private = NULL; 305 return 0; 306 } 307 308 int qxl_driver_load(struct drm_device *dev, unsigned long flags) 309 { 310 struct qxl_device *qdev; 311 int r; 312 313 /* require kms */ 314 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 315 return -ENODEV; 316 317 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); 318 if (qdev == NULL) 319 return -ENOMEM; 320 321 dev->dev_private = qdev; 322 323 r = qxl_device_init(qdev, dev, dev->pdev, flags); 324 if (r) 325 goto out; 326 327 r = qxl_modeset_init(qdev); 328 if (r) { 329 qxl_driver_unload(dev); 330 goto out; 331 } 332 333 drm_kms_helper_poll_init(qdev->ddev); 334 335 return 0; 336 out: 337 kfree(qdev); 338 return r; 339 } 340 341 342