1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.
48  */
49 
50 struct amdgpu_fence {
51 	struct fence base;
52 
53 	/* RB, DMA, etc. */
54 	struct amdgpu_ring		*ring;
55 };
56 
57 static struct kmem_cache *amdgpu_fence_slab;
58 
59 int amdgpu_fence_slab_init(void)
60 {
61 	amdgpu_fence_slab = kmem_cache_create(
62 		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 		SLAB_HWCACHE_ALIGN, NULL);
64 	if (!amdgpu_fence_slab)
65 		return -ENOMEM;
66 	return 0;
67 }
68 
69 void amdgpu_fence_slab_fini(void)
70 {
71 	rcu_barrier();
72 	kmem_cache_destroy(amdgpu_fence_slab);
73 }
74 /*
75  * Cast helper
76  */
77 static const struct fence_ops amdgpu_fence_ops;
78 static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
79 {
80 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
81 
82 	if (__f->base.ops == &amdgpu_fence_ops)
83 		return __f;
84 
85 	return NULL;
86 }
87 
88 /**
89  * amdgpu_fence_write - write a fence value
90  *
91  * @ring: ring the fence is associated with
92  * @seq: sequence number to write
93  *
94  * Writes a fence value to memory (all asics).
95  */
96 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
97 {
98 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
99 
100 	if (drv->cpu_addr)
101 		*drv->cpu_addr = cpu_to_le32(seq);
102 }
103 
104 /**
105  * amdgpu_fence_read - read a fence value
106  *
107  * @ring: ring the fence is associated with
108  *
109  * Reads a fence value from memory (all asics).
110  * Returns the value of the fence read from memory.
111  */
112 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113 {
114 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 	u32 seq = 0;
116 
117 	if (drv->cpu_addr)
118 		seq = le32_to_cpu(*drv->cpu_addr);
119 	else
120 		seq = atomic_read(&drv->last_seq);
121 
122 	return seq;
123 }
124 
125 /**
126  * amdgpu_fence_emit - emit a fence on the requested ring
127  *
128  * @ring: ring the fence is associated with
129  * @f: resulting fence object
130  *
131  * Emits a fence command on the requested ring (all asics).
132  * Returns 0 on success, -ENOMEM on failure.
133  */
134 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
135 {
136 	struct amdgpu_device *adev = ring->adev;
137 	struct amdgpu_fence *fence;
138 	struct fence *old, **ptr;
139 	uint32_t seq;
140 
141 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
142 	if (fence == NULL)
143 		return -ENOMEM;
144 
145 	seq = ++ring->fence_drv.sync_seq;
146 	fence->ring = ring;
147 	fence_init(&fence->base, &amdgpu_fence_ops,
148 		   &ring->fence_drv.lock,
149 		   adev->fence_context + ring->idx,
150 		   seq);
151 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
152 			       seq, AMDGPU_FENCE_FLAG_INT);
153 
154 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
155 	/* This function can't be called concurrently anyway, otherwise
156 	 * emitting the fence would mess up the hardware ring buffer.
157 	 */
158 	old = rcu_dereference_protected(*ptr, 1);
159 	if (old && !fence_is_signaled(old)) {
160 		DRM_INFO("rcu slot is busy\n");
161 		fence_wait(old, false);
162 	}
163 
164 	rcu_assign_pointer(*ptr, fence_get(&fence->base));
165 
166 	*f = &fence->base;
167 
168 	return 0;
169 }
170 
171 /**
172  * amdgpu_fence_schedule_fallback - schedule fallback check
173  *
174  * @ring: pointer to struct amdgpu_ring
175  *
176  * Start a timer as fallback to our interrupts.
177  */
178 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
179 {
180 	mod_timer(&ring->fence_drv.fallback_timer,
181 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
182 }
183 
184 /**
185  * amdgpu_fence_process - check for fence activity
186  *
187  * @ring: pointer to struct amdgpu_ring
188  *
189  * Checks the current fence value and calculates the last
190  * signalled fence value. Wakes the fence queue if the
191  * sequence number has increased.
192  */
193 void amdgpu_fence_process(struct amdgpu_ring *ring)
194 {
195 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
196 	uint32_t seq, last_seq;
197 	int r;
198 
199 	do {
200 		last_seq = atomic_read(&ring->fence_drv.last_seq);
201 		seq = amdgpu_fence_read(ring);
202 
203 	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
204 
205 	if (seq != ring->fence_drv.sync_seq)
206 		amdgpu_fence_schedule_fallback(ring);
207 
208 	if (unlikely(seq == last_seq))
209 		return;
210 
211 	last_seq &= drv->num_fences_mask;
212 	seq &= drv->num_fences_mask;
213 
214 	do {
215 		struct fence *fence, **ptr;
216 
217 		++last_seq;
218 		last_seq &= drv->num_fences_mask;
219 		ptr = &drv->fences[last_seq];
220 
221 		/* There is always exactly one thread signaling this fence slot */
222 		fence = rcu_dereference_protected(*ptr, 1);
223 		RCU_INIT_POINTER(*ptr, NULL);
224 
225 		if (!fence)
226 			continue;
227 
228 		r = fence_signal(fence);
229 		if (!r)
230 			FENCE_TRACE(fence, "signaled from irq context\n");
231 		else
232 			BUG();
233 
234 		fence_put(fence);
235 	} while (last_seq != seq);
236 }
237 
238 /**
239  * amdgpu_fence_fallback - fallback for hardware interrupts
240  *
241  * @work: delayed work item
242  *
243  * Checks for fence activity.
244  */
245 static void amdgpu_fence_fallback(unsigned long arg)
246 {
247 	struct amdgpu_ring *ring = (void *)arg;
248 
249 	amdgpu_fence_process(ring);
250 }
251 
252 /**
253  * amdgpu_fence_wait_empty - wait for all fences to signal
254  *
255  * @adev: amdgpu device pointer
256  * @ring: ring index the fence is associated with
257  *
258  * Wait for all fences on the requested ring to signal (all asics).
259  * Returns 0 if the fences have passed, error for all other cases.
260  */
261 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
262 {
263 	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
264 	struct fence *fence, **ptr;
265 	int r;
266 
267 	if (!seq)
268 		return 0;
269 
270 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
271 	rcu_read_lock();
272 	fence = rcu_dereference(*ptr);
273 	if (!fence || !fence_get_rcu(fence)) {
274 		rcu_read_unlock();
275 		return 0;
276 	}
277 	rcu_read_unlock();
278 
279 	r = fence_wait(fence, false);
280 	fence_put(fence);
281 	return r;
282 }
283 
284 /**
285  * amdgpu_fence_count_emitted - get the count of emitted fences
286  *
287  * @ring: ring the fence is associated with
288  *
289  * Get the number of fences emitted on the requested ring (all asics).
290  * Returns the number of emitted fences on the ring.  Used by the
291  * dynpm code to ring track activity.
292  */
293 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
294 {
295 	uint64_t emitted;
296 
297 	/* We are not protected by ring lock when reading the last sequence
298 	 * but it's ok to report slightly wrong fence count here.
299 	 */
300 	amdgpu_fence_process(ring);
301 	emitted = 0x100000000ull;
302 	emitted -= atomic_read(&ring->fence_drv.last_seq);
303 	emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
304 	return lower_32_bits(emitted);
305 }
306 
307 /**
308  * amdgpu_fence_driver_start_ring - make the fence driver
309  * ready for use on the requested ring.
310  *
311  * @ring: ring to start the fence driver on
312  * @irq_src: interrupt source to use for this ring
313  * @irq_type: interrupt type to use for this ring
314  *
315  * Make the fence driver ready for processing (all asics).
316  * Not all asics have all rings, so each asic will only
317  * start the fence driver on the rings it has.
318  * Returns 0 for success, errors for failure.
319  */
320 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
321 				   struct amdgpu_irq_src *irq_src,
322 				   unsigned irq_type)
323 {
324 	struct amdgpu_device *adev = ring->adev;
325 	uint64_t index;
326 
327 	if (ring != &adev->uvd.ring) {
328 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
329 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
330 	} else {
331 		/* put fence directly behind firmware */
332 		index = ALIGN(adev->uvd.fw->size, 8);
333 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
334 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
335 	}
336 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
337 	amdgpu_irq_get(adev, irq_src, irq_type);
338 
339 	ring->fence_drv.irq_src = irq_src;
340 	ring->fence_drv.irq_type = irq_type;
341 	ring->fence_drv.initialized = true;
342 
343 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
344 		 "cpu addr 0x%p\n", ring->idx,
345 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
346 	return 0;
347 }
348 
349 /**
350  * amdgpu_fence_driver_init_ring - init the fence driver
351  * for the requested ring.
352  *
353  * @ring: ring to init the fence driver on
354  * @num_hw_submission: number of entries on the hardware queue
355  *
356  * Init the fence driver for the requested ring (all asics).
357  * Helper function for amdgpu_fence_driver_init().
358  */
359 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
360 				  unsigned num_hw_submission)
361 {
362 	long timeout;
363 	int r;
364 
365 	/* Check that num_hw_submission is a power of two */
366 	if ((num_hw_submission & (num_hw_submission - 1)) != 0)
367 		return -EINVAL;
368 
369 	ring->fence_drv.cpu_addr = NULL;
370 	ring->fence_drv.gpu_addr = 0;
371 	ring->fence_drv.sync_seq = 0;
372 	atomic_set(&ring->fence_drv.last_seq, 0);
373 	ring->fence_drv.initialized = false;
374 
375 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
376 		    (unsigned long)ring);
377 
378 	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
379 	spin_lock_init(&ring->fence_drv.lock);
380 	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
381 					 GFP_KERNEL);
382 	if (!ring->fence_drv.fences)
383 		return -ENOMEM;
384 
385 	timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
386 	if (timeout == 0) {
387 		/*
388 		 * FIXME:
389 		 * Delayed workqueue cannot use it directly,
390 		 * so the scheduler will not use delayed workqueue if
391 		 * MAX_SCHEDULE_TIMEOUT is set.
392 		 * Currently keep it simple and silly.
393 		 */
394 		timeout = MAX_SCHEDULE_TIMEOUT;
395 	}
396 	r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
397 			   num_hw_submission,
398 			   timeout, ring->name);
399 	if (r) {
400 		DRM_ERROR("Failed to create scheduler on ring %s.\n",
401 			  ring->name);
402 		return r;
403 	}
404 
405 	return 0;
406 }
407 
408 /**
409  * amdgpu_fence_driver_init - init the fence driver
410  * for all possible rings.
411  *
412  * @adev: amdgpu device pointer
413  *
414  * Init the fence driver for all possible rings (all asics).
415  * Not all asics have all rings, so each asic will only
416  * start the fence driver on the rings it has using
417  * amdgpu_fence_driver_start_ring().
418  * Returns 0 for success.
419  */
420 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
421 {
422 	if (amdgpu_debugfs_fence_init(adev))
423 		dev_err(adev->dev, "fence debugfs file creation failed\n");
424 
425 	return 0;
426 }
427 
428 /**
429  * amdgpu_fence_driver_fini - tear down the fence driver
430  * for all possible rings.
431  *
432  * @adev: amdgpu device pointer
433  *
434  * Tear down the fence driver for all possible rings (all asics).
435  */
436 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
437 {
438 	unsigned i, j;
439 	int r;
440 
441 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
442 		struct amdgpu_ring *ring = adev->rings[i];
443 
444 		if (!ring || !ring->fence_drv.initialized)
445 			continue;
446 		r = amdgpu_fence_wait_empty(ring);
447 		if (r) {
448 			/* no need to trigger GPU reset as we are unloading */
449 			amdgpu_fence_driver_force_completion(adev);
450 		}
451 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
452 			       ring->fence_drv.irq_type);
453 		amd_sched_fini(&ring->sched);
454 		del_timer_sync(&ring->fence_drv.fallback_timer);
455 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
456 			fence_put(ring->fence_drv.fences[j]);
457 		kfree(ring->fence_drv.fences);
458 		ring->fence_drv.fences = NULL;
459 		ring->fence_drv.initialized = false;
460 	}
461 }
462 
463 /**
464  * amdgpu_fence_driver_suspend - suspend the fence driver
465  * for all possible rings.
466  *
467  * @adev: amdgpu device pointer
468  *
469  * Suspend the fence driver for all possible rings (all asics).
470  */
471 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
472 {
473 	int i, r;
474 
475 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
476 		struct amdgpu_ring *ring = adev->rings[i];
477 		if (!ring || !ring->fence_drv.initialized)
478 			continue;
479 
480 		/* wait for gpu to finish processing current batch */
481 		r = amdgpu_fence_wait_empty(ring);
482 		if (r) {
483 			/* delay GPU reset to resume */
484 			amdgpu_fence_driver_force_completion(adev);
485 		}
486 
487 		/* disable the interrupt */
488 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
489 			       ring->fence_drv.irq_type);
490 	}
491 }
492 
493 /**
494  * amdgpu_fence_driver_resume - resume the fence driver
495  * for all possible rings.
496  *
497  * @adev: amdgpu device pointer
498  *
499  * Resume the fence driver for all possible rings (all asics).
500  * Not all asics have all rings, so each asic will only
501  * start the fence driver on the rings it has using
502  * amdgpu_fence_driver_start_ring().
503  * Returns 0 for success.
504  */
505 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
506 {
507 	int i;
508 
509 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
510 		struct amdgpu_ring *ring = adev->rings[i];
511 		if (!ring || !ring->fence_drv.initialized)
512 			continue;
513 
514 		/* enable the interrupt */
515 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
516 			       ring->fence_drv.irq_type);
517 	}
518 }
519 
520 /**
521  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
522  *
523  * @adev: amdgpu device pointer
524  *
525  * In case of GPU reset failure make sure no process keep waiting on fence
526  * that will never complete.
527  */
528 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
529 {
530 	int i;
531 
532 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
533 		struct amdgpu_ring *ring = adev->rings[i];
534 		if (!ring || !ring->fence_drv.initialized)
535 			continue;
536 
537 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
538 	}
539 }
540 
541 /*
542  * Common fence implementation
543  */
544 
545 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
546 {
547 	return "amdgpu";
548 }
549 
550 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
551 {
552 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
553 	return (const char *)fence->ring->name;
554 }
555 
556 /**
557  * amdgpu_fence_enable_signaling - enable signalling on fence
558  * @fence: fence
559  *
560  * This function is called with fence_queue lock held, and adds a callback
561  * to fence_queue that checks if this fence is signaled, and if so it
562  * signals the fence and removes itself.
563  */
564 static bool amdgpu_fence_enable_signaling(struct fence *f)
565 {
566 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
567 	struct amdgpu_ring *ring = fence->ring;
568 
569 	if (!timer_pending(&ring->fence_drv.fallback_timer))
570 		amdgpu_fence_schedule_fallback(ring);
571 
572 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
573 
574 	return true;
575 }
576 
577 /**
578  * amdgpu_fence_free - free up the fence memory
579  *
580  * @rcu: RCU callback head
581  *
582  * Free up the fence memory after the RCU grace period.
583  */
584 static void amdgpu_fence_free(struct rcu_head *rcu)
585 {
586 	struct fence *f = container_of(rcu, struct fence, rcu);
587 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
588 	kmem_cache_free(amdgpu_fence_slab, fence);
589 }
590 
591 /**
592  * amdgpu_fence_release - callback that fence can be freed
593  *
594  * @fence: fence
595  *
596  * This function is called when the reference count becomes zero.
597  * It just RCU schedules freeing up the fence.
598  */
599 static void amdgpu_fence_release(struct fence *f)
600 {
601 	call_rcu(&f->rcu, amdgpu_fence_free);
602 }
603 
604 static const struct fence_ops amdgpu_fence_ops = {
605 	.get_driver_name = amdgpu_fence_get_driver_name,
606 	.get_timeline_name = amdgpu_fence_get_timeline_name,
607 	.enable_signaling = amdgpu_fence_enable_signaling,
608 	.wait = fence_default_wait,
609 	.release = amdgpu_fence_release,
610 };
611 
612 /*
613  * Fence debugfs
614  */
615 #if defined(CONFIG_DEBUG_FS)
616 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
617 {
618 	struct drm_info_node *node = (struct drm_info_node *)m->private;
619 	struct drm_device *dev = node->minor->dev;
620 	struct amdgpu_device *adev = dev->dev_private;
621 	int i;
622 
623 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
624 		struct amdgpu_ring *ring = adev->rings[i];
625 		if (!ring || !ring->fence_drv.initialized)
626 			continue;
627 
628 		amdgpu_fence_process(ring);
629 
630 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
631 		seq_printf(m, "Last signaled fence 0x%08x\n",
632 			   atomic_read(&ring->fence_drv.last_seq));
633 		seq_printf(m, "Last emitted        0x%08x\n",
634 			   ring->fence_drv.sync_seq);
635 	}
636 	return 0;
637 }
638 
639 /**
640  * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
641  *
642  * Manually trigger a gpu reset at the next fence wait.
643  */
644 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
645 {
646 	struct drm_info_node *node = (struct drm_info_node *) m->private;
647 	struct drm_device *dev = node->minor->dev;
648 	struct amdgpu_device *adev = dev->dev_private;
649 
650 	seq_printf(m, "gpu reset\n");
651 	amdgpu_gpu_reset(adev);
652 
653 	return 0;
654 }
655 
656 static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
657 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
658 	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
659 };
660 #endif
661 
662 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
663 {
664 #if defined(CONFIG_DEBUG_FS)
665 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
666 #else
667 	return 0;
668 #endif
669 }
670 
671