1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.
48  */
49 
50 struct amdgpu_fence {
51 	struct fence base;
52 
53 	/* RB, DMA, etc. */
54 	struct amdgpu_ring		*ring;
55 };
56 
57 static struct kmem_cache *amdgpu_fence_slab;
58 
59 int amdgpu_fence_slab_init(void)
60 {
61 	amdgpu_fence_slab = kmem_cache_create(
62 		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 		SLAB_HWCACHE_ALIGN, NULL);
64 	if (!amdgpu_fence_slab)
65 		return -ENOMEM;
66 	return 0;
67 }
68 
69 void amdgpu_fence_slab_fini(void)
70 {
71 	kmem_cache_destroy(amdgpu_fence_slab);
72 }
73 /*
74  * Cast helper
75  */
76 static const struct fence_ops amdgpu_fence_ops;
77 static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
78 {
79 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
80 
81 	if (__f->base.ops == &amdgpu_fence_ops)
82 		return __f;
83 
84 	return NULL;
85 }
86 
87 /**
88  * amdgpu_fence_write - write a fence value
89  *
90  * @ring: ring the fence is associated with
91  * @seq: sequence number to write
92  *
93  * Writes a fence value to memory (all asics).
94  */
95 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
96 {
97 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
98 
99 	if (drv->cpu_addr)
100 		*drv->cpu_addr = cpu_to_le32(seq);
101 }
102 
103 /**
104  * amdgpu_fence_read - read a fence value
105  *
106  * @ring: ring the fence is associated with
107  *
108  * Reads a fence value from memory (all asics).
109  * Returns the value of the fence read from memory.
110  */
111 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
112 {
113 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
114 	u32 seq = 0;
115 
116 	if (drv->cpu_addr)
117 		seq = le32_to_cpu(*drv->cpu_addr);
118 	else
119 		seq = atomic_read(&drv->last_seq);
120 
121 	return seq;
122 }
123 
124 /**
125  * amdgpu_fence_emit - emit a fence on the requested ring
126  *
127  * @ring: ring the fence is associated with
128  * @f: resulting fence object
129  *
130  * Emits a fence command on the requested ring (all asics).
131  * Returns 0 on success, -ENOMEM on failure.
132  */
133 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
134 {
135 	struct amdgpu_device *adev = ring->adev;
136 	struct amdgpu_fence *fence;
137 	struct fence *old, **ptr;
138 	uint32_t seq;
139 
140 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
141 	if (fence == NULL)
142 		return -ENOMEM;
143 
144 	seq = ++ring->fence_drv.sync_seq;
145 	fence->ring = ring;
146 	fence_init(&fence->base, &amdgpu_fence_ops,
147 		   &ring->fence_drv.lock,
148 		   adev->fence_context + ring->idx,
149 		   seq);
150 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
151 			       seq, AMDGPU_FENCE_FLAG_INT);
152 
153 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
154 	/* This function can't be called concurrently anyway, otherwise
155 	 * emitting the fence would mess up the hardware ring buffer.
156 	 */
157 	old = rcu_dereference_protected(*ptr, 1);
158 	if (old && !fence_is_signaled(old)) {
159 		DRM_INFO("rcu slot is busy\n");
160 		fence_wait(old, false);
161 	}
162 
163 	rcu_assign_pointer(*ptr, fence_get(&fence->base));
164 
165 	*f = &fence->base;
166 
167 	return 0;
168 }
169 
170 /**
171  * amdgpu_fence_schedule_fallback - schedule fallback check
172  *
173  * @ring: pointer to struct amdgpu_ring
174  *
175  * Start a timer as fallback to our interrupts.
176  */
177 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
178 {
179 	mod_timer(&ring->fence_drv.fallback_timer,
180 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
181 }
182 
183 /**
184  * amdgpu_fence_process - check for fence activity
185  *
186  * @ring: pointer to struct amdgpu_ring
187  *
188  * Checks the current fence value and calculates the last
189  * signalled fence value. Wakes the fence queue if the
190  * sequence number has increased.
191  */
192 void amdgpu_fence_process(struct amdgpu_ring *ring)
193 {
194 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
195 	uint32_t seq, last_seq;
196 	int r;
197 
198 	do {
199 		last_seq = atomic_read(&ring->fence_drv.last_seq);
200 		seq = amdgpu_fence_read(ring);
201 
202 	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
203 
204 	if (seq != ring->fence_drv.sync_seq)
205 		amdgpu_fence_schedule_fallback(ring);
206 
207 	if (unlikely(seq == last_seq))
208 		return;
209 
210 	last_seq &= drv->num_fences_mask;
211 	seq &= drv->num_fences_mask;
212 
213 	do {
214 		struct fence *fence, **ptr;
215 
216 		++last_seq;
217 		last_seq &= drv->num_fences_mask;
218 		ptr = &drv->fences[last_seq];
219 
220 		/* There is always exactly one thread signaling this fence slot */
221 		fence = rcu_dereference_protected(*ptr, 1);
222 		RCU_INIT_POINTER(*ptr, NULL);
223 
224 		if (!fence)
225 			continue;
226 
227 		r = fence_signal(fence);
228 		if (!r)
229 			FENCE_TRACE(fence, "signaled from irq context\n");
230 		else
231 			BUG();
232 
233 		fence_put(fence);
234 	} while (last_seq != seq);
235 }
236 
237 /**
238  * amdgpu_fence_fallback - fallback for hardware interrupts
239  *
240  * @work: delayed work item
241  *
242  * Checks for fence activity.
243  */
244 static void amdgpu_fence_fallback(unsigned long arg)
245 {
246 	struct amdgpu_ring *ring = (void *)arg;
247 
248 	amdgpu_fence_process(ring);
249 }
250 
251 /**
252  * amdgpu_fence_wait_empty - wait for all fences to signal
253  *
254  * @adev: amdgpu device pointer
255  * @ring: ring index the fence is associated with
256  *
257  * Wait for all fences on the requested ring to signal (all asics).
258  * Returns 0 if the fences have passed, error for all other cases.
259  */
260 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
261 {
262 	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
263 	struct fence *fence, **ptr;
264 	int r;
265 
266 	if (!seq)
267 		return 0;
268 
269 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
270 	rcu_read_lock();
271 	fence = rcu_dereference(*ptr);
272 	if (!fence || !fence_get_rcu(fence)) {
273 		rcu_read_unlock();
274 		return 0;
275 	}
276 	rcu_read_unlock();
277 
278 	r = fence_wait(fence, false);
279 	fence_put(fence);
280 	return r;
281 }
282 
283 /**
284  * amdgpu_fence_count_emitted - get the count of emitted fences
285  *
286  * @ring: ring the fence is associated with
287  *
288  * Get the number of fences emitted on the requested ring (all asics).
289  * Returns the number of emitted fences on the ring.  Used by the
290  * dynpm code to ring track activity.
291  */
292 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
293 {
294 	uint64_t emitted;
295 
296 	/* We are not protected by ring lock when reading the last sequence
297 	 * but it's ok to report slightly wrong fence count here.
298 	 */
299 	amdgpu_fence_process(ring);
300 	emitted = 0x100000000ull;
301 	emitted -= atomic_read(&ring->fence_drv.last_seq);
302 	emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
303 	return lower_32_bits(emitted);
304 }
305 
306 /**
307  * amdgpu_fence_driver_start_ring - make the fence driver
308  * ready for use on the requested ring.
309  *
310  * @ring: ring to start the fence driver on
311  * @irq_src: interrupt source to use for this ring
312  * @irq_type: interrupt type to use for this ring
313  *
314  * Make the fence driver ready for processing (all asics).
315  * Not all asics have all rings, so each asic will only
316  * start the fence driver on the rings it has.
317  * Returns 0 for success, errors for failure.
318  */
319 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
320 				   struct amdgpu_irq_src *irq_src,
321 				   unsigned irq_type)
322 {
323 	struct amdgpu_device *adev = ring->adev;
324 	uint64_t index;
325 
326 	if (ring != &adev->uvd.ring) {
327 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
328 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
329 	} else {
330 		/* put fence directly behind firmware */
331 		index = ALIGN(adev->uvd.fw->size, 8);
332 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
333 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
334 	}
335 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
336 	amdgpu_irq_get(adev, irq_src, irq_type);
337 
338 	ring->fence_drv.irq_src = irq_src;
339 	ring->fence_drv.irq_type = irq_type;
340 	ring->fence_drv.initialized = true;
341 
342 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
343 		 "cpu addr 0x%p\n", ring->idx,
344 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
345 	return 0;
346 }
347 
348 /**
349  * amdgpu_fence_driver_init_ring - init the fence driver
350  * for the requested ring.
351  *
352  * @ring: ring to init the fence driver on
353  * @num_hw_submission: number of entries on the hardware queue
354  *
355  * Init the fence driver for the requested ring (all asics).
356  * Helper function for amdgpu_fence_driver_init().
357  */
358 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
359 				  unsigned num_hw_submission)
360 {
361 	long timeout;
362 	int r;
363 
364 	/* Check that num_hw_submission is a power of two */
365 	if ((num_hw_submission & (num_hw_submission - 1)) != 0)
366 		return -EINVAL;
367 
368 	ring->fence_drv.cpu_addr = NULL;
369 	ring->fence_drv.gpu_addr = 0;
370 	ring->fence_drv.sync_seq = 0;
371 	atomic_set(&ring->fence_drv.last_seq, 0);
372 	ring->fence_drv.initialized = false;
373 
374 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
375 		    (unsigned long)ring);
376 
377 	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
378 	spin_lock_init(&ring->fence_drv.lock);
379 	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
380 					 GFP_KERNEL);
381 	if (!ring->fence_drv.fences)
382 		return -ENOMEM;
383 
384 	timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
385 	if (timeout == 0) {
386 		/*
387 		 * FIXME:
388 		 * Delayed workqueue cannot use it directly,
389 		 * so the scheduler will not use delayed workqueue if
390 		 * MAX_SCHEDULE_TIMEOUT is set.
391 		 * Currently keep it simple and silly.
392 		 */
393 		timeout = MAX_SCHEDULE_TIMEOUT;
394 	}
395 	r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
396 			   num_hw_submission,
397 			   timeout, ring->name);
398 	if (r) {
399 		DRM_ERROR("Failed to create scheduler on ring %s.\n",
400 			  ring->name);
401 		return r;
402 	}
403 
404 	return 0;
405 }
406 
407 /**
408  * amdgpu_fence_driver_init - init the fence driver
409  * for all possible rings.
410  *
411  * @adev: amdgpu device pointer
412  *
413  * Init the fence driver for all possible rings (all asics).
414  * Not all asics have all rings, so each asic will only
415  * start the fence driver on the rings it has using
416  * amdgpu_fence_driver_start_ring().
417  * Returns 0 for success.
418  */
419 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
420 {
421 	if (amdgpu_debugfs_fence_init(adev))
422 		dev_err(adev->dev, "fence debugfs file creation failed\n");
423 
424 	return 0;
425 }
426 
427 /**
428  * amdgpu_fence_driver_fini - tear down the fence driver
429  * for all possible rings.
430  *
431  * @adev: amdgpu device pointer
432  *
433  * Tear down the fence driver for all possible rings (all asics).
434  */
435 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
436 {
437 	unsigned i, j;
438 	int r;
439 
440 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
441 		struct amdgpu_ring *ring = adev->rings[i];
442 
443 		if (!ring || !ring->fence_drv.initialized)
444 			continue;
445 		r = amdgpu_fence_wait_empty(ring);
446 		if (r) {
447 			/* no need to trigger GPU reset as we are unloading */
448 			amdgpu_fence_driver_force_completion(adev);
449 		}
450 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
451 			       ring->fence_drv.irq_type);
452 		amd_sched_fini(&ring->sched);
453 		del_timer_sync(&ring->fence_drv.fallback_timer);
454 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
455 			fence_put(ring->fence_drv.fences[j]);
456 		kfree(ring->fence_drv.fences);
457 		ring->fence_drv.initialized = false;
458 	}
459 }
460 
461 /**
462  * amdgpu_fence_driver_suspend - suspend the fence driver
463  * for all possible rings.
464  *
465  * @adev: amdgpu device pointer
466  *
467  * Suspend the fence driver for all possible rings (all asics).
468  */
469 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
470 {
471 	int i, r;
472 
473 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
474 		struct amdgpu_ring *ring = adev->rings[i];
475 		if (!ring || !ring->fence_drv.initialized)
476 			continue;
477 
478 		/* wait for gpu to finish processing current batch */
479 		r = amdgpu_fence_wait_empty(ring);
480 		if (r) {
481 			/* delay GPU reset to resume */
482 			amdgpu_fence_driver_force_completion(adev);
483 		}
484 
485 		/* disable the interrupt */
486 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
487 			       ring->fence_drv.irq_type);
488 	}
489 }
490 
491 /**
492  * amdgpu_fence_driver_resume - resume the fence driver
493  * for all possible rings.
494  *
495  * @adev: amdgpu device pointer
496  *
497  * Resume the fence driver for all possible rings (all asics).
498  * Not all asics have all rings, so each asic will only
499  * start the fence driver on the rings it has using
500  * amdgpu_fence_driver_start_ring().
501  * Returns 0 for success.
502  */
503 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
504 {
505 	int i;
506 
507 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
508 		struct amdgpu_ring *ring = adev->rings[i];
509 		if (!ring || !ring->fence_drv.initialized)
510 			continue;
511 
512 		/* enable the interrupt */
513 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
514 			       ring->fence_drv.irq_type);
515 	}
516 }
517 
518 /**
519  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
520  *
521  * @adev: amdgpu device pointer
522  *
523  * In case of GPU reset failure make sure no process keep waiting on fence
524  * that will never complete.
525  */
526 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
527 {
528 	int i;
529 
530 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
531 		struct amdgpu_ring *ring = adev->rings[i];
532 		if (!ring || !ring->fence_drv.initialized)
533 			continue;
534 
535 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
536 	}
537 }
538 
539 /*
540  * Common fence implementation
541  */
542 
543 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
544 {
545 	return "amdgpu";
546 }
547 
548 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
549 {
550 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
551 	return (const char *)fence->ring->name;
552 }
553 
554 /**
555  * amdgpu_fence_enable_signaling - enable signalling on fence
556  * @fence: fence
557  *
558  * This function is called with fence_queue lock held, and adds a callback
559  * to fence_queue that checks if this fence is signaled, and if so it
560  * signals the fence and removes itself.
561  */
562 static bool amdgpu_fence_enable_signaling(struct fence *f)
563 {
564 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
565 	struct amdgpu_ring *ring = fence->ring;
566 
567 	if (!timer_pending(&ring->fence_drv.fallback_timer))
568 		amdgpu_fence_schedule_fallback(ring);
569 
570 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
571 
572 	return true;
573 }
574 
575 /**
576  * amdgpu_fence_free - free up the fence memory
577  *
578  * @rcu: RCU callback head
579  *
580  * Free up the fence memory after the RCU grace period.
581  */
582 static void amdgpu_fence_free(struct rcu_head *rcu)
583 {
584 	struct fence *f = container_of(rcu, struct fence, rcu);
585 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
586 	kmem_cache_free(amdgpu_fence_slab, fence);
587 }
588 
589 /**
590  * amdgpu_fence_release - callback that fence can be freed
591  *
592  * @fence: fence
593  *
594  * This function is called when the reference count becomes zero.
595  * It just RCU schedules freeing up the fence.
596  */
597 static void amdgpu_fence_release(struct fence *f)
598 {
599 	call_rcu(&f->rcu, amdgpu_fence_free);
600 }
601 
602 static const struct fence_ops amdgpu_fence_ops = {
603 	.get_driver_name = amdgpu_fence_get_driver_name,
604 	.get_timeline_name = amdgpu_fence_get_timeline_name,
605 	.enable_signaling = amdgpu_fence_enable_signaling,
606 	.wait = fence_default_wait,
607 	.release = amdgpu_fence_release,
608 };
609 
610 /*
611  * Fence debugfs
612  */
613 #if defined(CONFIG_DEBUG_FS)
614 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
615 {
616 	struct drm_info_node *node = (struct drm_info_node *)m->private;
617 	struct drm_device *dev = node->minor->dev;
618 	struct amdgpu_device *adev = dev->dev_private;
619 	int i;
620 
621 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
622 		struct amdgpu_ring *ring = adev->rings[i];
623 		if (!ring || !ring->fence_drv.initialized)
624 			continue;
625 
626 		amdgpu_fence_process(ring);
627 
628 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
629 		seq_printf(m, "Last signaled fence 0x%08x\n",
630 			   atomic_read(&ring->fence_drv.last_seq));
631 		seq_printf(m, "Last emitted        0x%08x\n",
632 			   ring->fence_drv.sync_seq);
633 	}
634 	return 0;
635 }
636 
637 /**
638  * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
639  *
640  * Manually trigger a gpu reset at the next fence wait.
641  */
642 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
643 {
644 	struct drm_info_node *node = (struct drm_info_node *) m->private;
645 	struct drm_device *dev = node->minor->dev;
646 	struct amdgpu_device *adev = dev->dev_private;
647 
648 	seq_printf(m, "gpu reset\n");
649 	amdgpu_gpu_reset(adev);
650 
651 	return 0;
652 }
653 
654 static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
655 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
656 	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
657 };
658 #endif
659 
660 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
661 {
662 #if defined(CONFIG_DEBUG_FS)
663 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
664 #else
665 	return 0;
666 #endif
667 }
668 
669