xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_kms.c (revision 51ad5b54)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include <linux/io-mapping.h>
27 #include <linux/pci.h>
28 
29 #include <drm/drm_drv.h>
30 #include <drm/drm_managed.h>
31 #include <drm/drm_probe_helper.h>
32 
33 #include "qxl_drv.h"
34 #include "qxl_object.h"
35 
36 int qxl_log_level;
37 
38 static bool qxl_check_device(struct qxl_device *qdev)
39 {
40 	struct qxl_rom *rom = qdev->rom;
41 
42 	if (rom->magic != 0x4f525851) {
43 		DRM_ERROR("bad rom signature %x\n", rom->magic);
44 		return false;
45 	}
46 
47 	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
48 	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
49 		 rom->log_level);
50 	DRM_INFO("%d io pages at offset 0x%x\n",
51 		 rom->num_io_pages, rom->pages_offset);
52 	DRM_INFO("%d byte draw area at offset 0x%x\n",
53 		 rom->surface0_area_size, rom->draw_area_offset);
54 
55 	qdev->vram_size = rom->surface0_area_size;
56 	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
57 	return true;
58 }
59 
60 static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
61 {
62 	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
63 	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
64 	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
65 }
66 
67 static void setup_slot(struct qxl_device *qdev,
68 		       struct qxl_memslot *slot,
69 		       unsigned int slot_index,
70 		       const char *slot_name,
71 		       unsigned long start_phys_addr,
72 		       unsigned long size)
73 {
74 	uint64_t high_bits;
75 
76 	slot->index = slot_index;
77 	slot->name = slot_name;
78 	slot->start_phys_addr = start_phys_addr;
79 	slot->size = size;
80 
81 	setup_hw_slot(qdev, slot);
82 
83 	slot->generation = qdev->rom->slot_generation;
84 	high_bits = (qdev->rom->slots_start + slot->index)
85 		<< qdev->rom->slot_gen_bits;
86 	high_bits |= slot->generation;
87 	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
88 	slot->high_bits = high_bits;
89 
90 	DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n",
91 		 slot->index, slot->name,
92 		 (unsigned long)slot->start_phys_addr,
93 		 (unsigned long)slot->size,
94 		 (unsigned long)slot->gpu_offset);
95 }
96 
97 void qxl_reinit_memslots(struct qxl_device *qdev)
98 {
99 	setup_hw_slot(qdev, &qdev->main_slot);
100 	setup_hw_slot(qdev, &qdev->surfaces_slot);
101 }
102 
103 static void qxl_gc_work(struct work_struct *work)
104 {
105 	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
106 
107 	qxl_garbage_collect(qdev);
108 }
109 
110 int qxl_device_init(struct qxl_device *qdev,
111 		    struct pci_dev *pdev)
112 {
113 	int r, sb;
114 
115 	qdev->ddev.pdev = pdev;
116 	pci_set_drvdata(pdev, &qdev->ddev);
117 
118 	mutex_init(&qdev->gem.mutex);
119 	mutex_init(&qdev->update_area_mutex);
120 	mutex_init(&qdev->release_mutex);
121 	mutex_init(&qdev->surf_evict_mutex);
122 	qxl_gem_init(qdev);
123 
124 	qdev->rom_base = pci_resource_start(pdev, 2);
125 	qdev->rom_size = pci_resource_len(pdev, 2);
126 	qdev->vram_base = pci_resource_start(pdev, 0);
127 	qdev->io_base = pci_resource_start(pdev, 3);
128 
129 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
130 	if (!qdev->vram_mapping) {
131 		pr_err("Unable to create vram_mapping");
132 		return -ENOMEM;
133 	}
134 
135 	if (pci_resource_len(pdev, 4) > 0) {
136 		/* 64bit surface bar present */
137 		sb = 4;
138 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
139 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
140 		qdev->surface_mapping =
141 			io_mapping_create_wc(qdev->surfaceram_base,
142 					     qdev->surfaceram_size);
143 	}
144 	if (qdev->surface_mapping == NULL) {
145 		/* 64bit surface bar not present (or mapping failed) */
146 		sb = 1;
147 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
148 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
149 		qdev->surface_mapping =
150 			io_mapping_create_wc(qdev->surfaceram_base,
151 					     qdev->surfaceram_size);
152 		if (!qdev->surface_mapping) {
153 			pr_err("Unable to create surface_mapping");
154 			r = -ENOMEM;
155 			goto vram_mapping_free;
156 		}
157 	}
158 
159 	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
160 		 (unsigned long long)qdev->vram_base,
161 		 (unsigned long long)pci_resource_end(pdev, 0),
162 		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
163 		 (int)pci_resource_len(pdev, 0) / 1024,
164 		 (unsigned long long)qdev->surfaceram_base,
165 		 (unsigned long long)pci_resource_end(pdev, sb),
166 		 (int)qdev->surfaceram_size / 1024 / 1024,
167 		 (int)qdev->surfaceram_size / 1024,
168 		 (sb == 4) ? "64bit" : "32bit");
169 
170 	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
171 	if (!qdev->rom) {
172 		pr_err("Unable to ioremap ROM\n");
173 		r = -ENOMEM;
174 		goto surface_mapping_free;
175 	}
176 
177 	if (!qxl_check_device(qdev)) {
178 		r = -ENODEV;
179 		goto rom_unmap;
180 	}
181 
182 	r = qxl_bo_init(qdev);
183 	if (r) {
184 		DRM_ERROR("bo init failed %d\n", r);
185 		goto rom_unmap;
186 	}
187 
188 	qdev->ram_header = ioremap(qdev->vram_base +
189 				   qdev->rom->ram_header_offset,
190 				   sizeof(*qdev->ram_header));
191 	if (!qdev->ram_header) {
192 		DRM_ERROR("Unable to ioremap RAM header\n");
193 		r = -ENOMEM;
194 		goto bo_fini;
195 	}
196 
197 	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
198 					     sizeof(struct qxl_command),
199 					     QXL_COMMAND_RING_SIZE,
200 					     qdev->io_base + QXL_IO_NOTIFY_CMD,
201 					     false,
202 					     &qdev->display_event);
203 	if (!qdev->command_ring) {
204 		DRM_ERROR("Unable to create command ring\n");
205 		r = -ENOMEM;
206 		goto ram_header_unmap;
207 	}
208 
209 	qdev->cursor_ring = qxl_ring_create(
210 				&(qdev->ram_header->cursor_ring_hdr),
211 				sizeof(struct qxl_command),
212 				QXL_CURSOR_RING_SIZE,
213 				qdev->io_base + QXL_IO_NOTIFY_CURSOR,
214 				false,
215 				&qdev->cursor_event);
216 
217 	if (!qdev->cursor_ring) {
218 		DRM_ERROR("Unable to create cursor ring\n");
219 		r = -ENOMEM;
220 		goto command_ring_free;
221 	}
222 
223 	qdev->release_ring = qxl_ring_create(
224 				&(qdev->ram_header->release_ring_hdr),
225 				sizeof(uint64_t),
226 				QXL_RELEASE_RING_SIZE, 0, true,
227 				NULL);
228 
229 	if (!qdev->release_ring) {
230 		DRM_ERROR("Unable to create release ring\n");
231 		r = -ENOMEM;
232 		goto cursor_ring_free;
233 	}
234 
235 	idr_init(&qdev->release_idr);
236 	spin_lock_init(&qdev->release_idr_lock);
237 	spin_lock_init(&qdev->release_lock);
238 
239 	idr_init(&qdev->surf_id_idr);
240 	spin_lock_init(&qdev->surf_id_idr_lock);
241 
242 	mutex_init(&qdev->async_io_mutex);
243 
244 	/* reset the device into a known state - no memslots, no primary
245 	 * created, no surfaces. */
246 	qxl_io_reset(qdev);
247 
248 	/* must initialize irq before first async io - slot creation */
249 	r = qxl_irq_init(qdev);
250 	if (r) {
251 		DRM_ERROR("Unable to init qxl irq\n");
252 		goto release_ring_free;
253 	}
254 
255 	/*
256 	 * Note that virtual is surface0. We rely on the single ioremap done
257 	 * before.
258 	 */
259 	setup_slot(qdev, &qdev->main_slot, 0, "main",
260 		   (unsigned long)qdev->vram_base,
261 		   (unsigned long)qdev->rom->ram_header_offset);
262 	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
263 		   (unsigned long)qdev->surfaceram_base,
264 		   (unsigned long)qdev->surfaceram_size);
265 
266 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
267 
268 	return 0;
269 
270 release_ring_free:
271 	qxl_ring_free(qdev->release_ring);
272 cursor_ring_free:
273 	qxl_ring_free(qdev->cursor_ring);
274 command_ring_free:
275 	qxl_ring_free(qdev->command_ring);
276 ram_header_unmap:
277 	iounmap(qdev->ram_header);
278 bo_fini:
279 	qxl_bo_fini(qdev);
280 rom_unmap:
281 	iounmap(qdev->rom);
282 surface_mapping_free:
283 	io_mapping_free(qdev->surface_mapping);
284 vram_mapping_free:
285 	io_mapping_free(qdev->vram_mapping);
286 	return r;
287 }
288 
289 void qxl_device_fini(struct qxl_device *qdev)
290 {
291 	qxl_bo_unref(&qdev->current_release_bo[0]);
292 	qxl_bo_unref(&qdev->current_release_bo[1]);
293 	qxl_gem_fini(qdev);
294 	qxl_bo_fini(qdev);
295 	flush_work(&qdev->gc_work);
296 	qxl_ring_free(qdev->command_ring);
297 	qxl_ring_free(qdev->cursor_ring);
298 	qxl_ring_free(qdev->release_ring);
299 	io_mapping_free(qdev->surface_mapping);
300 	io_mapping_free(qdev->vram_mapping);
301 	iounmap(qdev->ram_header);
302 	iounmap(qdev->rom);
303 	qdev->rom = NULL;
304 }
305