1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016-2018 Intel Corporation
5  */
6 
7 #include "gt/intel_gt_types.h"
8 
9 #include "i915_drv.h"
10 
11 #include "i915_active.h"
12 #include "i915_syncmap.h"
13 #include "gt/intel_timeline.h"
14 
15 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
16 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
17 
18 struct intel_timeline_hwsp {
19 	struct intel_gt *gt;
20 	struct intel_gt_timelines *gt_timelines;
21 	struct list_head free_link;
22 	struct i915_vma *vma;
23 	u64 free_bitmap;
24 };
25 
26 struct intel_timeline_cacheline {
27 	struct i915_active active;
28 	struct intel_timeline_hwsp *hwsp;
29 	void *vaddr;
30 #define CACHELINE_BITS 6
31 #define CACHELINE_FREE CACHELINE_BITS
32 };
33 
34 static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
35 {
36 	struct drm_i915_private *i915 = gt->i915;
37 	struct drm_i915_gem_object *obj;
38 	struct i915_vma *vma;
39 
40 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
41 	if (IS_ERR(obj))
42 		return ERR_CAST(obj);
43 
44 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
45 
46 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
47 	if (IS_ERR(vma))
48 		i915_gem_object_put(obj);
49 
50 	return vma;
51 }
52 
53 static struct i915_vma *
54 hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
55 {
56 	struct intel_gt_timelines *gt = &timeline->gt->timelines;
57 	struct intel_timeline_hwsp *hwsp;
58 
59 	BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
60 
61 	spin_lock_irq(&gt->hwsp_lock);
62 
63 	/* hwsp_free_list only contains HWSP that have available cachelines */
64 	hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
65 					typeof(*hwsp), free_link);
66 	if (!hwsp) {
67 		struct i915_vma *vma;
68 
69 		spin_unlock_irq(&gt->hwsp_lock);
70 
71 		hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
72 		if (!hwsp)
73 			return ERR_PTR(-ENOMEM);
74 
75 		vma = __hwsp_alloc(timeline->gt);
76 		if (IS_ERR(vma)) {
77 			kfree(hwsp);
78 			return vma;
79 		}
80 
81 		vma->private = hwsp;
82 		hwsp->gt = timeline->gt;
83 		hwsp->vma = vma;
84 		hwsp->free_bitmap = ~0ull;
85 		hwsp->gt_timelines = gt;
86 
87 		spin_lock_irq(&gt->hwsp_lock);
88 		list_add(&hwsp->free_link, &gt->hwsp_free_list);
89 	}
90 
91 	GEM_BUG_ON(!hwsp->free_bitmap);
92 	*cacheline = __ffs64(hwsp->free_bitmap);
93 	hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
94 	if (!hwsp->free_bitmap)
95 		list_del(&hwsp->free_link);
96 
97 	spin_unlock_irq(&gt->hwsp_lock);
98 
99 	GEM_BUG_ON(hwsp->vma->private != hwsp);
100 	return hwsp->vma;
101 }
102 
103 static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
104 {
105 	struct intel_gt_timelines *gt = hwsp->gt_timelines;
106 	unsigned long flags;
107 
108 	spin_lock_irqsave(&gt->hwsp_lock, flags);
109 
110 	/* As a cacheline becomes available, publish the HWSP on the freelist */
111 	if (!hwsp->free_bitmap)
112 		list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
113 
114 	GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
115 	hwsp->free_bitmap |= BIT_ULL(cacheline);
116 
117 	/* And if no one is left using it, give the page back to the system */
118 	if (hwsp->free_bitmap == ~0ull) {
119 		i915_vma_put(hwsp->vma);
120 		list_del(&hwsp->free_link);
121 		kfree(hwsp);
122 	}
123 
124 	spin_unlock_irqrestore(&gt->hwsp_lock, flags);
125 }
126 
127 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
128 {
129 	GEM_BUG_ON(!i915_active_is_idle(&cl->active));
130 
131 	i915_gem_object_unpin_map(cl->hwsp->vma->obj);
132 	i915_vma_put(cl->hwsp->vma);
133 	__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
134 
135 	i915_active_fini(&cl->active);
136 	kfree(cl);
137 }
138 
139 static void __cacheline_retire(struct i915_active *active)
140 {
141 	struct intel_timeline_cacheline *cl =
142 		container_of(active, typeof(*cl), active);
143 
144 	i915_vma_unpin(cl->hwsp->vma);
145 	if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
146 		__idle_cacheline_free(cl);
147 }
148 
149 static int __cacheline_active(struct i915_active *active)
150 {
151 	struct intel_timeline_cacheline *cl =
152 		container_of(active, typeof(*cl), active);
153 
154 	__i915_vma_pin(cl->hwsp->vma);
155 	return 0;
156 }
157 
158 static struct intel_timeline_cacheline *
159 cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
160 {
161 	struct intel_timeline_cacheline *cl;
162 	void *vaddr;
163 
164 	GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
165 
166 	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
167 	if (!cl)
168 		return ERR_PTR(-ENOMEM);
169 
170 	vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
171 	if (IS_ERR(vaddr)) {
172 		kfree(cl);
173 		return ERR_CAST(vaddr);
174 	}
175 
176 	i915_vma_get(hwsp->vma);
177 	cl->hwsp = hwsp;
178 	cl->vaddr = page_pack_bits(vaddr, cacheline);
179 
180 	i915_active_init(hwsp->gt->i915, &cl->active,
181 			 __cacheline_active, __cacheline_retire);
182 
183 	return cl;
184 }
185 
186 static void cacheline_acquire(struct intel_timeline_cacheline *cl)
187 {
188 	if (cl)
189 		i915_active_acquire(&cl->active);
190 }
191 
192 static void cacheline_release(struct intel_timeline_cacheline *cl)
193 {
194 	if (cl)
195 		i915_active_release(&cl->active);
196 }
197 
198 static void cacheline_free(struct intel_timeline_cacheline *cl)
199 {
200 	GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
201 	cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
202 
203 	if (i915_active_is_idle(&cl->active))
204 		__idle_cacheline_free(cl);
205 }
206 
207 int intel_timeline_init(struct intel_timeline *timeline,
208 			struct intel_gt *gt,
209 			struct i915_vma *hwsp)
210 {
211 	void *vaddr;
212 
213 	kref_init(&timeline->kref);
214 
215 	timeline->gt = gt;
216 	timeline->pin_count = 0;
217 
218 	timeline->has_initial_breadcrumb = !hwsp;
219 	timeline->hwsp_cacheline = NULL;
220 
221 	if (!hwsp) {
222 		struct intel_timeline_cacheline *cl;
223 		unsigned int cacheline;
224 
225 		hwsp = hwsp_alloc(timeline, &cacheline);
226 		if (IS_ERR(hwsp))
227 			return PTR_ERR(hwsp);
228 
229 		cl = cacheline_alloc(hwsp->private, cacheline);
230 		if (IS_ERR(cl)) {
231 			__idle_hwsp_free(hwsp->private, cacheline);
232 			return PTR_ERR(cl);
233 		}
234 
235 		timeline->hwsp_cacheline = cl;
236 		timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
237 
238 		vaddr = page_mask_bits(cl->vaddr);
239 	} else {
240 		timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
241 
242 		vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
243 		if (IS_ERR(vaddr))
244 			return PTR_ERR(vaddr);
245 	}
246 
247 	timeline->hwsp_seqno =
248 		memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
249 
250 	timeline->hwsp_ggtt = i915_vma_get(hwsp);
251 	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
252 
253 	timeline->fence_context = dma_fence_context_alloc(1);
254 
255 	mutex_init(&timeline->mutex);
256 
257 	INIT_ACTIVE_REQUEST(&timeline->last_request);
258 	INIT_LIST_HEAD(&timeline->requests);
259 
260 	i915_syncmap_init(&timeline->sync);
261 
262 	return 0;
263 }
264 
265 static void timelines_init(struct intel_gt *gt)
266 {
267 	struct intel_gt_timelines *timelines = &gt->timelines;
268 
269 	mutex_init(&timelines->mutex);
270 	INIT_LIST_HEAD(&timelines->active_list);
271 
272 	spin_lock_init(&timelines->hwsp_lock);
273 	INIT_LIST_HEAD(&timelines->hwsp_free_list);
274 }
275 
276 void intel_timelines_init(struct drm_i915_private *i915)
277 {
278 	timelines_init(&i915->gt);
279 }
280 
281 static void timeline_add_to_active(struct intel_timeline *tl)
282 {
283 	struct intel_gt_timelines *gt = &tl->gt->timelines;
284 
285 	mutex_lock(&gt->mutex);
286 	list_add(&tl->link, &gt->active_list);
287 	mutex_unlock(&gt->mutex);
288 }
289 
290 static void timeline_remove_from_active(struct intel_timeline *tl)
291 {
292 	struct intel_gt_timelines *gt = &tl->gt->timelines;
293 
294 	mutex_lock(&gt->mutex);
295 	list_del(&tl->link);
296 	mutex_unlock(&gt->mutex);
297 }
298 
299 static void timelines_park(struct intel_gt *gt)
300 {
301 	struct intel_gt_timelines *timelines = &gt->timelines;
302 	struct intel_timeline *timeline;
303 
304 	mutex_lock(&timelines->mutex);
305 	list_for_each_entry(timeline, &timelines->active_list, link) {
306 		/*
307 		 * All known fences are completed so we can scrap
308 		 * the current sync point tracking and start afresh,
309 		 * any attempt to wait upon a previous sync point
310 		 * will be skipped as the fence was signaled.
311 		 */
312 		i915_syncmap_free(&timeline->sync);
313 	}
314 	mutex_unlock(&timelines->mutex);
315 }
316 
317 /**
318  * intel_timelines_park - called when the driver idles
319  * @i915: the drm_i915_private device
320  *
321  * When the driver is completely idle, we know that all of our sync points
322  * have been signaled and our tracking is then entirely redundant. Any request
323  * to wait upon an older sync point will be completed instantly as we know
324  * the fence is signaled and therefore we will not even look them up in the
325  * sync point map.
326  */
327 void intel_timelines_park(struct drm_i915_private *i915)
328 {
329 	timelines_park(&i915->gt);
330 }
331 
332 void intel_timeline_fini(struct intel_timeline *timeline)
333 {
334 	GEM_BUG_ON(timeline->pin_count);
335 	GEM_BUG_ON(!list_empty(&timeline->requests));
336 
337 	i915_syncmap_free(&timeline->sync);
338 
339 	if (timeline->hwsp_cacheline)
340 		cacheline_free(timeline->hwsp_cacheline);
341 	else
342 		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
343 
344 	i915_vma_put(timeline->hwsp_ggtt);
345 }
346 
347 struct intel_timeline *
348 intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
349 {
350 	struct intel_timeline *timeline;
351 	int err;
352 
353 	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
354 	if (!timeline)
355 		return ERR_PTR(-ENOMEM);
356 
357 	err = intel_timeline_init(timeline, gt, global_hwsp);
358 	if (err) {
359 		kfree(timeline);
360 		return ERR_PTR(err);
361 	}
362 
363 	return timeline;
364 }
365 
366 int intel_timeline_pin(struct intel_timeline *tl)
367 {
368 	int err;
369 
370 	if (tl->pin_count++)
371 		return 0;
372 	GEM_BUG_ON(!tl->pin_count);
373 
374 	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
375 	if (err)
376 		goto unpin;
377 
378 	tl->hwsp_offset =
379 		i915_ggtt_offset(tl->hwsp_ggtt) +
380 		offset_in_page(tl->hwsp_offset);
381 
382 	cacheline_acquire(tl->hwsp_cacheline);
383 	timeline_add_to_active(tl);
384 
385 	return 0;
386 
387 unpin:
388 	tl->pin_count = 0;
389 	return err;
390 }
391 
392 static u32 timeline_advance(struct intel_timeline *tl)
393 {
394 	GEM_BUG_ON(!tl->pin_count);
395 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
396 
397 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
398 }
399 
400 static void timeline_rollback(struct intel_timeline *tl)
401 {
402 	tl->seqno -= 1 + tl->has_initial_breadcrumb;
403 }
404 
405 static noinline int
406 __intel_timeline_get_seqno(struct intel_timeline *tl,
407 			   struct i915_request *rq,
408 			   u32 *seqno)
409 {
410 	struct intel_timeline_cacheline *cl;
411 	unsigned int cacheline;
412 	struct i915_vma *vma;
413 	void *vaddr;
414 	int err;
415 
416 	/*
417 	 * If there is an outstanding GPU reference to this cacheline,
418 	 * such as it being sampled by a HW semaphore on another timeline,
419 	 * we cannot wraparound our seqno value (the HW semaphore does
420 	 * a strict greater-than-or-equals compare, not i915_seqno_passed).
421 	 * So if the cacheline is still busy, we must detach ourselves
422 	 * from it and leave it inflight alongside its users.
423 	 *
424 	 * However, if nobody is watching and we can guarantee that nobody
425 	 * will, we could simply reuse the same cacheline.
426 	 *
427 	 * if (i915_active_request_is_signaled(&tl->last_request) &&
428 	 *     i915_active_is_signaled(&tl->hwsp_cacheline->active))
429 	 *	return 0;
430 	 *
431 	 * That seems unlikely for a busy timeline that needed to wrap in
432 	 * the first place, so just replace the cacheline.
433 	 */
434 
435 	vma = hwsp_alloc(tl, &cacheline);
436 	if (IS_ERR(vma)) {
437 		err = PTR_ERR(vma);
438 		goto err_rollback;
439 	}
440 
441 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
442 	if (err) {
443 		__idle_hwsp_free(vma->private, cacheline);
444 		goto err_rollback;
445 	}
446 
447 	cl = cacheline_alloc(vma->private, cacheline);
448 	if (IS_ERR(cl)) {
449 		err = PTR_ERR(cl);
450 		__idle_hwsp_free(vma->private, cacheline);
451 		goto err_unpin;
452 	}
453 	GEM_BUG_ON(cl->hwsp->vma != vma);
454 
455 	/*
456 	 * Attach the old cacheline to the current request, so that we only
457 	 * free it after the current request is retired, which ensures that
458 	 * all writes into the cacheline from previous requests are complete.
459 	 */
460 	err = i915_active_ref(&tl->hwsp_cacheline->active,
461 			      tl->fence_context, rq);
462 	if (err)
463 		goto err_cacheline;
464 
465 	cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
466 	cacheline_free(tl->hwsp_cacheline);
467 
468 	i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
469 	i915_vma_put(tl->hwsp_ggtt);
470 
471 	tl->hwsp_ggtt = i915_vma_get(vma);
472 
473 	vaddr = page_mask_bits(cl->vaddr);
474 	tl->hwsp_offset = cacheline * CACHELINE_BYTES;
475 	tl->hwsp_seqno =
476 		memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
477 
478 	tl->hwsp_offset += i915_ggtt_offset(vma);
479 
480 	cacheline_acquire(cl);
481 	tl->hwsp_cacheline = cl;
482 
483 	*seqno = timeline_advance(tl);
484 	GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
485 	return 0;
486 
487 err_cacheline:
488 	cacheline_free(cl);
489 err_unpin:
490 	i915_vma_unpin(vma);
491 err_rollback:
492 	timeline_rollback(tl);
493 	return err;
494 }
495 
496 int intel_timeline_get_seqno(struct intel_timeline *tl,
497 			     struct i915_request *rq,
498 			     u32 *seqno)
499 {
500 	*seqno = timeline_advance(tl);
501 
502 	/* Replace the HWSP on wraparound for HW semaphores */
503 	if (unlikely(!*seqno && tl->hwsp_cacheline))
504 		return __intel_timeline_get_seqno(tl, rq, seqno);
505 
506 	return 0;
507 }
508 
509 static int cacheline_ref(struct intel_timeline_cacheline *cl,
510 			 struct i915_request *rq)
511 {
512 	return i915_active_ref(&cl->active, rq->fence.context, rq);
513 }
514 
515 int intel_timeline_read_hwsp(struct i915_request *from,
516 			     struct i915_request *to,
517 			     u32 *hwsp)
518 {
519 	struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
520 	struct intel_timeline *tl = from->timeline;
521 	int err;
522 
523 	GEM_BUG_ON(to->timeline == tl);
524 
525 	mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
526 	err = i915_request_completed(from);
527 	if (!err)
528 		err = cacheline_ref(cl, to);
529 	if (!err) {
530 		if (likely(cl == tl->hwsp_cacheline)) {
531 			*hwsp = tl->hwsp_offset;
532 		} else { /* across a seqno wrap, recover the original offset */
533 			*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
534 				ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
535 				CACHELINE_BYTES;
536 		}
537 	}
538 	mutex_unlock(&tl->mutex);
539 
540 	return err;
541 }
542 
543 void intel_timeline_unpin(struct intel_timeline *tl)
544 {
545 	GEM_BUG_ON(!tl->pin_count);
546 	if (--tl->pin_count)
547 		return;
548 
549 	timeline_remove_from_active(tl);
550 	cacheline_release(tl->hwsp_cacheline);
551 
552 	/*
553 	 * Since this timeline is idle, all bariers upon which we were waiting
554 	 * must also be complete and so we can discard the last used barriers
555 	 * without loss of information.
556 	 */
557 	i915_syncmap_free(&tl->sync);
558 
559 	__i915_vma_unpin(tl->hwsp_ggtt);
560 }
561 
562 void __intel_timeline_free(struct kref *kref)
563 {
564 	struct intel_timeline *timeline =
565 		container_of(kref, typeof(*timeline), kref);
566 
567 	intel_timeline_fini(timeline);
568 	kfree(timeline);
569 }
570 
571 static void timelines_fini(struct intel_gt *gt)
572 {
573 	struct intel_gt_timelines *timelines = &gt->timelines;
574 
575 	GEM_BUG_ON(!list_empty(&timelines->active_list));
576 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
577 
578 	mutex_destroy(&timelines->mutex);
579 }
580 
581 void intel_timelines_fini(struct drm_i915_private *i915)
582 {
583 	timelines_fini(&i915->gt);
584 }
585 
586 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
587 #include "gt/selftests/mock_timeline.c"
588 #include "gt/selftest_timeline.c"
589 #endif
590