xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_release.c (revision f0931824)
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/delay.h>
24 
25 #include <trace/events/dma_fence.h>
26 
27 #include "qxl_drv.h"
28 #include "qxl_object.h"
29 
30 /*
31  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32  * into 256 byte chunks for now - gives 16 cmds per page.
33  *
34  * use an ida to index into the chunks?
35  */
36 /* manage releaseables */
37 /* stack them 16 high for now -drawable object is 191 */
38 #define RELEASE_SIZE 256
39 #define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
40 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41 #define SURFACE_RELEASE_SIZE 128
42 #define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
43 
44 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46 
47 static const char *qxl_get_driver_name(struct dma_fence *fence)
48 {
49 	return "qxl";
50 }
51 
52 static const char *qxl_get_timeline_name(struct dma_fence *fence)
53 {
54 	return "release";
55 }
56 
57 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58 			   signed long timeout)
59 {
60 	struct qxl_device *qdev;
61 	struct qxl_release *release;
62 	int count = 0, sc = 0;
63 	bool have_drawable_releases;
64 	unsigned long cur, end = jiffies + timeout;
65 
66 	qdev = container_of(fence->lock, struct qxl_device, release_lock);
67 	release = container_of(fence, struct qxl_release, base);
68 	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
69 
70 retry:
71 	sc++;
72 
73 	if (dma_fence_is_signaled(fence))
74 		goto signaled;
75 
76 	qxl_io_notify_oom(qdev);
77 
78 	for (count = 0; count < 11; count++) {
79 		if (!qxl_queue_garbage_collect(qdev, true))
80 			break;
81 
82 		if (dma_fence_is_signaled(fence))
83 			goto signaled;
84 	}
85 
86 	if (dma_fence_is_signaled(fence))
87 		goto signaled;
88 
89 	if (have_drawable_releases || sc < 4) {
90 		if (sc > 2)
91 			/* back off */
92 			usleep_range(500, 1000);
93 
94 		if (time_after(jiffies, end))
95 			return 0;
96 
97 		if (have_drawable_releases && sc > 300) {
98 			DMA_FENCE_WARN(fence,
99 				       "failed to wait on release %llu after spincount %d\n",
100 				       fence->context & ~0xf0000000, sc);
101 			goto signaled;
102 		}
103 		goto retry;
104 	}
105 	/*
106 	 * yeah, original sync_obj_wait gave up after 3 spins when
107 	 * have_drawable_releases is not set.
108 	 */
109 
110 signaled:
111 	cur = jiffies;
112 	if (time_after(cur, end))
113 		return 0;
114 	return end - cur;
115 }
116 
117 static const struct dma_fence_ops qxl_fence_ops = {
118 	.get_driver_name = qxl_get_driver_name,
119 	.get_timeline_name = qxl_get_timeline_name,
120 	.wait = qxl_fence_wait,
121 };
122 
123 static int
124 qxl_release_alloc(struct qxl_device *qdev, int type,
125 		  struct qxl_release **ret)
126 {
127 	struct qxl_release *release;
128 	int handle;
129 	size_t size = sizeof(*release);
130 
131 	release = kmalloc(size, GFP_KERNEL);
132 	if (!release) {
133 		DRM_ERROR("Out of memory\n");
134 		return -ENOMEM;
135 	}
136 	release->base.ops = NULL;
137 	release->type = type;
138 	release->release_offset = 0;
139 	release->surface_release_id = 0;
140 	INIT_LIST_HEAD(&release->bos);
141 
142 	idr_preload(GFP_KERNEL);
143 	spin_lock(&qdev->release_idr_lock);
144 	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
145 	release->base.seqno = ++qdev->release_seqno;
146 	spin_unlock(&qdev->release_idr_lock);
147 	idr_preload_end();
148 	if (handle < 0) {
149 		kfree(release);
150 		*ret = NULL;
151 		return handle;
152 	}
153 	*ret = release;
154 	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
155 	release->id = handle;
156 	return handle;
157 }
158 
159 static void
160 qxl_release_free_list(struct qxl_release *release)
161 {
162 	while (!list_empty(&release->bos)) {
163 		struct qxl_bo_list *entry;
164 		struct qxl_bo *bo;
165 
166 		entry = container_of(release->bos.next,
167 				     struct qxl_bo_list, tv.head);
168 		bo = to_qxl_bo(entry->tv.bo);
169 		qxl_bo_unref(&bo);
170 		list_del(&entry->tv.head);
171 		kfree(entry);
172 	}
173 	release->release_bo = NULL;
174 }
175 
176 void
177 qxl_release_free(struct qxl_device *qdev,
178 		 struct qxl_release *release)
179 {
180 	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
181 
182 	if (release->surface_release_id)
183 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
184 
185 	spin_lock(&qdev->release_idr_lock);
186 	idr_remove(&qdev->release_idr, release->id);
187 	spin_unlock(&qdev->release_idr_lock);
188 
189 	if (release->base.ops) {
190 		WARN_ON(list_empty(&release->bos));
191 		qxl_release_free_list(release);
192 
193 		dma_fence_signal(&release->base);
194 		dma_fence_put(&release->base);
195 	} else {
196 		qxl_release_free_list(release);
197 		kfree(release);
198 	}
199 	atomic_dec(&qdev->release_count);
200 }
201 
202 static int qxl_release_bo_alloc(struct qxl_device *qdev,
203 				struct qxl_bo **bo,
204 				u32 priority)
205 {
206 	/* pin releases bo's they are too messy to evict */
207 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
208 			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
209 }
210 
211 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
212 {
213 	struct qxl_bo_list *entry;
214 
215 	list_for_each_entry(entry, &release->bos, tv.head) {
216 		if (entry->tv.bo == &bo->tbo)
217 			return 0;
218 	}
219 
220 	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
221 	if (!entry)
222 		return -ENOMEM;
223 
224 	qxl_bo_ref(bo);
225 	entry->tv.bo = &bo->tbo;
226 	entry->tv.num_shared = 0;
227 	list_add_tail(&entry->tv.head, &release->bos);
228 	return 0;
229 }
230 
231 static int qxl_release_validate_bo(struct qxl_bo *bo)
232 {
233 	struct ttm_operation_ctx ctx = { true, false };
234 	int ret;
235 
236 	if (!bo->tbo.pin_count) {
237 		qxl_ttm_placement_from_domain(bo, bo->type);
238 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
239 		if (ret)
240 			return ret;
241 	}
242 
243 	ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
244 	if (ret)
245 		return ret;
246 
247 	/* allocate a surface for reserved + validated buffers */
248 	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
249 	if (ret)
250 		return ret;
251 	return 0;
252 }
253 
254 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
255 {
256 	int ret;
257 	struct qxl_bo_list *entry;
258 
259 	/* if only one object on the release its the release itself
260 	   since these objects are pinned no need to reserve */
261 	if (list_is_singular(&release->bos))
262 		return 0;
263 
264 	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
265 				     !no_intr, NULL);
266 	if (ret)
267 		return ret;
268 
269 	list_for_each_entry(entry, &release->bos, tv.head) {
270 		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
271 
272 		ret = qxl_release_validate_bo(bo);
273 		if (ret) {
274 			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
275 			return ret;
276 		}
277 	}
278 	return 0;
279 }
280 
281 void qxl_release_backoff_reserve_list(struct qxl_release *release)
282 {
283 	/* if only one object on the release its the release itself
284 	   since these objects are pinned no need to reserve */
285 	if (list_is_singular(&release->bos))
286 		return;
287 
288 	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
289 }
290 
291 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
292 				       enum qxl_surface_cmd_type surface_cmd_type,
293 				       struct qxl_release *create_rel,
294 				       struct qxl_release **release)
295 {
296 	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
297 		int idr_ret;
298 		struct qxl_bo *bo;
299 		union qxl_release_info *info;
300 
301 		/* stash the release after the create command */
302 		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
303 		if (idr_ret < 0)
304 			return idr_ret;
305 		bo = create_rel->release_bo;
306 
307 		(*release)->release_bo = bo;
308 		(*release)->release_offset = create_rel->release_offset + 64;
309 
310 		qxl_release_list_add(*release, bo);
311 
312 		info = qxl_release_map(qdev, *release);
313 		info->id = idr_ret;
314 		qxl_release_unmap(qdev, *release, info);
315 		return 0;
316 	}
317 
318 	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
319 					 QXL_RELEASE_SURFACE_CMD, release, NULL);
320 }
321 
322 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
323 				       int type, struct qxl_release **release,
324 				       struct qxl_bo **rbo)
325 {
326 	struct qxl_bo *bo, *free_bo = NULL;
327 	int idr_ret;
328 	int ret = 0;
329 	union qxl_release_info *info;
330 	int cur_idx;
331 	u32 priority;
332 
333 	if (type == QXL_RELEASE_DRAWABLE) {
334 		cur_idx = 0;
335 		priority = 0;
336 	} else if (type == QXL_RELEASE_SURFACE_CMD) {
337 		cur_idx = 1;
338 		priority = 1;
339 	} else if (type == QXL_RELEASE_CURSOR_CMD) {
340 		cur_idx = 2;
341 		priority = 1;
342 	}
343 	else {
344 		DRM_ERROR("got illegal type: %d\n", type);
345 		return -EINVAL;
346 	}
347 
348 	idr_ret = qxl_release_alloc(qdev, type, release);
349 	if (idr_ret < 0) {
350 		if (rbo)
351 			*rbo = NULL;
352 		return idr_ret;
353 	}
354 	atomic_inc(&qdev->release_count);
355 
356 	mutex_lock(&qdev->release_mutex);
357 	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
358 		free_bo = qdev->current_release_bo[cur_idx];
359 		qdev->current_release_bo_offset[cur_idx] = 0;
360 		qdev->current_release_bo[cur_idx] = NULL;
361 	}
362 	if (!qdev->current_release_bo[cur_idx]) {
363 		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
364 		if (ret) {
365 			mutex_unlock(&qdev->release_mutex);
366 			if (free_bo) {
367 				qxl_bo_unpin(free_bo);
368 				qxl_bo_unref(&free_bo);
369 			}
370 			qxl_release_free(qdev, *release);
371 			return ret;
372 		}
373 	}
374 
375 	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
376 
377 	(*release)->release_bo = bo;
378 	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
379 	qdev->current_release_bo_offset[cur_idx]++;
380 
381 	if (rbo)
382 		*rbo = bo;
383 
384 	mutex_unlock(&qdev->release_mutex);
385 	if (free_bo) {
386 		qxl_bo_unpin(free_bo);
387 		qxl_bo_unref(&free_bo);
388 	}
389 
390 	ret = qxl_release_list_add(*release, bo);
391 	qxl_bo_unref(&bo);
392 	if (ret) {
393 		qxl_release_free(qdev, *release);
394 		return ret;
395 	}
396 
397 	info = qxl_release_map(qdev, *release);
398 	info->id = idr_ret;
399 	qxl_release_unmap(qdev, *release, info);
400 
401 	return ret;
402 }
403 
404 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
405 						   uint64_t id)
406 {
407 	struct qxl_release *release;
408 
409 	spin_lock(&qdev->release_idr_lock);
410 	release = idr_find(&qdev->release_idr, id);
411 	spin_unlock(&qdev->release_idr_lock);
412 	if (!release) {
413 		DRM_ERROR("failed to find id in release_idr\n");
414 		return NULL;
415 	}
416 
417 	return release;
418 }
419 
420 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
421 					struct qxl_release *release)
422 {
423 	void *ptr;
424 	union qxl_release_info *info;
425 	struct qxl_bo *bo = release->release_bo;
426 
427 	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
428 	if (!ptr)
429 		return NULL;
430 	info = ptr + (release->release_offset & ~PAGE_MASK);
431 	return info;
432 }
433 
434 void qxl_release_unmap(struct qxl_device *qdev,
435 		       struct qxl_release *release,
436 		       union qxl_release_info *info)
437 {
438 	struct qxl_bo *bo = release->release_bo;
439 	void *ptr;
440 
441 	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
442 	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
443 }
444 
445 void qxl_release_fence_buffer_objects(struct qxl_release *release)
446 {
447 	struct ttm_buffer_object *bo;
448 	struct ttm_device *bdev;
449 	struct ttm_validate_buffer *entry;
450 	struct qxl_device *qdev;
451 
452 	/* if only one object on the release its the release itself
453 	   since these objects are pinned no need to reserve */
454 	if (list_is_singular(&release->bos) || list_empty(&release->bos))
455 		return;
456 
457 	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
458 	bdev = bo->bdev;
459 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
460 
461 	/*
462 	 * Since we never really allocated a context and we don't want to conflict,
463 	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
464 	 */
465 	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
466 		       release->id | 0xf0000000, release->base.seqno);
467 	trace_dma_fence_emit(&release->base);
468 
469 	list_for_each_entry(entry, &release->bos, head) {
470 		bo = entry->bo;
471 
472 		dma_resv_add_fence(bo->base.resv, &release->base,
473 				   DMA_RESV_USAGE_READ);
474 		ttm_bo_move_to_lru_tail_unlocked(bo);
475 		dma_resv_unlock(bo->base.resv);
476 	}
477 	ww_acquire_fini(&release->ticket);
478 }
479 
480