1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher * Dave Airlie 30d38ceaf9SAlex Deucher */ 31d38ceaf9SAlex Deucher #include <linux/seq_file.h> 32d38ceaf9SAlex Deucher #include <linux/atomic.h> 33d38ceaf9SAlex Deucher #include <linux/wait.h> 34d38ceaf9SAlex Deucher #include <linux/kref.h> 35d38ceaf9SAlex Deucher #include <linux/slab.h> 36d38ceaf9SAlex Deucher #include <linux/firmware.h> 37d38ceaf9SAlex Deucher #include <drm/drmP.h> 38d38ceaf9SAlex Deucher #include "amdgpu.h" 39d38ceaf9SAlex Deucher #include "amdgpu_trace.h" 40d38ceaf9SAlex Deucher 41d38ceaf9SAlex Deucher /* 42d38ceaf9SAlex Deucher * Fences 43d38ceaf9SAlex Deucher * Fences mark an event in the GPUs pipeline and are used 44d38ceaf9SAlex Deucher * for GPU/CPU synchronization. When the fence is written, 45d38ceaf9SAlex Deucher * it is expected that all buffers associated with that fence 46d38ceaf9SAlex Deucher * are no longer in use by the associated ring on the GPU and 47d38ceaf9SAlex Deucher * that the the relevant GPU caches have been flushed. 48d38ceaf9SAlex Deucher */ 49d38ceaf9SAlex Deucher 50d38ceaf9SAlex Deucher /** 51d38ceaf9SAlex Deucher * amdgpu_fence_write - write a fence value 52d38ceaf9SAlex Deucher * 53d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 54d38ceaf9SAlex Deucher * @seq: sequence number to write 55d38ceaf9SAlex Deucher * 56d38ceaf9SAlex Deucher * Writes a fence value to memory (all asics). 57d38ceaf9SAlex Deucher */ 58d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 59d38ceaf9SAlex Deucher { 60d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 61d38ceaf9SAlex Deucher 62d38ceaf9SAlex Deucher if (drv->cpu_addr) 63d38ceaf9SAlex Deucher *drv->cpu_addr = cpu_to_le32(seq); 64d38ceaf9SAlex Deucher } 65d38ceaf9SAlex Deucher 66d38ceaf9SAlex Deucher /** 67d38ceaf9SAlex Deucher * amdgpu_fence_read - read a fence value 68d38ceaf9SAlex Deucher * 69d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 70d38ceaf9SAlex Deucher * 71d38ceaf9SAlex Deucher * Reads a fence value from memory (all asics). 72d38ceaf9SAlex Deucher * Returns the value of the fence read from memory. 73d38ceaf9SAlex Deucher */ 74d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 75d38ceaf9SAlex Deucher { 76d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 77d38ceaf9SAlex Deucher u32 seq = 0; 78d38ceaf9SAlex Deucher 79d38ceaf9SAlex Deucher if (drv->cpu_addr) 80d38ceaf9SAlex Deucher seq = le32_to_cpu(*drv->cpu_addr); 81d38ceaf9SAlex Deucher else 82d38ceaf9SAlex Deucher seq = lower_32_bits(atomic64_read(&drv->last_seq)); 83d38ceaf9SAlex Deucher 84d38ceaf9SAlex Deucher return seq; 85d38ceaf9SAlex Deucher } 86d38ceaf9SAlex Deucher 87d38ceaf9SAlex Deucher /** 88d38ceaf9SAlex Deucher * amdgpu_fence_schedule_check - schedule lockup check 89d38ceaf9SAlex Deucher * 90d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 91d38ceaf9SAlex Deucher * 92d38ceaf9SAlex Deucher * Queues a delayed work item to check for lockups. 93d38ceaf9SAlex Deucher */ 94d38ceaf9SAlex Deucher static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) 95d38ceaf9SAlex Deucher { 96d38ceaf9SAlex Deucher /* 97d38ceaf9SAlex Deucher * Do not reset the timer here with mod_delayed_work, 98d38ceaf9SAlex Deucher * this can livelock in an interaction with TTM delayed destroy. 99d38ceaf9SAlex Deucher */ 100d38ceaf9SAlex Deucher queue_delayed_work(system_power_efficient_wq, 101d38ceaf9SAlex Deucher &ring->fence_drv.lockup_work, 102d38ceaf9SAlex Deucher AMDGPU_FENCE_JIFFIES_TIMEOUT); 103d38ceaf9SAlex Deucher } 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher /** 106d38ceaf9SAlex Deucher * amdgpu_fence_emit - emit a fence on the requested ring 107d38ceaf9SAlex Deucher * 108d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 109d38ceaf9SAlex Deucher * @owner: creator of the fence 110d38ceaf9SAlex Deucher * @fence: amdgpu fence object 111d38ceaf9SAlex Deucher * 112d38ceaf9SAlex Deucher * Emits a fence command on the requested ring (all asics). 113d38ceaf9SAlex Deucher * Returns 0 on success, -ENOMEM on failure. 114d38ceaf9SAlex Deucher */ 115d38ceaf9SAlex Deucher int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 116d38ceaf9SAlex Deucher struct amdgpu_fence **fence) 117d38ceaf9SAlex Deucher { 118d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 119d38ceaf9SAlex Deucher 120d38ceaf9SAlex Deucher /* we are protected by the ring emission mutex */ 121d38ceaf9SAlex Deucher *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 122d38ceaf9SAlex Deucher if ((*fence) == NULL) { 123d38ceaf9SAlex Deucher return -ENOMEM; 124d38ceaf9SAlex Deucher } 125d38ceaf9SAlex Deucher (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; 126d38ceaf9SAlex Deucher (*fence)->ring = ring; 127d38ceaf9SAlex Deucher (*fence)->owner = owner; 128d38ceaf9SAlex Deucher fence_init(&(*fence)->base, &amdgpu_fence_ops, 1297f06c236Smonk.liu &ring->fence_drv.fence_queue.lock, 1307f06c236Smonk.liu adev->fence_context + ring->idx, 131d38ceaf9SAlex Deucher (*fence)->seq); 132890ee23fSChunming Zhou amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 133890ee23fSChunming Zhou (*fence)->seq, 134890ee23fSChunming Zhou AMDGPU_FENCE_FLAG_INT); 135d38ceaf9SAlex Deucher trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 136d38ceaf9SAlex Deucher return 0; 137d38ceaf9SAlex Deucher } 138d38ceaf9SAlex Deucher 139d38ceaf9SAlex Deucher /** 140d38ceaf9SAlex Deucher * amdgpu_fence_check_signaled - callback from fence_queue 141d38ceaf9SAlex Deucher * 142d38ceaf9SAlex Deucher * this function is called with fence_queue lock held, which is also used 143d38ceaf9SAlex Deucher * for the fence locking itself, so unlocked variants are used for 144d38ceaf9SAlex Deucher * fence_signal, and remove_wait_queue. 145d38ceaf9SAlex Deucher */ 146d38ceaf9SAlex Deucher static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 147d38ceaf9SAlex Deucher { 148d38ceaf9SAlex Deucher struct amdgpu_fence *fence; 149d38ceaf9SAlex Deucher struct amdgpu_device *adev; 150d38ceaf9SAlex Deucher u64 seq; 151d38ceaf9SAlex Deucher int ret; 152d38ceaf9SAlex Deucher 153d38ceaf9SAlex Deucher fence = container_of(wait, struct amdgpu_fence, fence_wake); 154d38ceaf9SAlex Deucher adev = fence->ring->adev; 155d38ceaf9SAlex Deucher 156d38ceaf9SAlex Deucher /* 157d38ceaf9SAlex Deucher * We cannot use amdgpu_fence_process here because we're already 158d38ceaf9SAlex Deucher * in the waitqueue, in a call from wake_up_all. 159d38ceaf9SAlex Deucher */ 160d38ceaf9SAlex Deucher seq = atomic64_read(&fence->ring->fence_drv.last_seq); 161d38ceaf9SAlex Deucher if (seq >= fence->seq) { 162d38ceaf9SAlex Deucher ret = fence_signal_locked(&fence->base); 163d38ceaf9SAlex Deucher if (!ret) 164d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "signaled from irq context\n"); 165d38ceaf9SAlex Deucher else 166d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "was already signaled\n"); 167d38ceaf9SAlex Deucher 1687f06c236Smonk.liu __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); 169d38ceaf9SAlex Deucher fence_put(&fence->base); 170d38ceaf9SAlex Deucher } else 171d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "pending\n"); 172d38ceaf9SAlex Deucher return 0; 173d38ceaf9SAlex Deucher } 174d38ceaf9SAlex Deucher 175d38ceaf9SAlex Deucher /** 176d38ceaf9SAlex Deucher * amdgpu_fence_activity - check for fence activity 177d38ceaf9SAlex Deucher * 178d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 179d38ceaf9SAlex Deucher * 180d38ceaf9SAlex Deucher * Checks the current fence value and calculates the last 181d38ceaf9SAlex Deucher * signalled fence value. Returns true if activity occured 182d38ceaf9SAlex Deucher * on the ring, and the fence_queue should be waken up. 183d38ceaf9SAlex Deucher */ 184d38ceaf9SAlex Deucher static bool amdgpu_fence_activity(struct amdgpu_ring *ring) 185d38ceaf9SAlex Deucher { 186d38ceaf9SAlex Deucher uint64_t seq, last_seq, last_emitted; 187d38ceaf9SAlex Deucher unsigned count_loop = 0; 188d38ceaf9SAlex Deucher bool wake = false; 189d38ceaf9SAlex Deucher 190d38ceaf9SAlex Deucher /* Note there is a scenario here for an infinite loop but it's 191d38ceaf9SAlex Deucher * very unlikely to happen. For it to happen, the current polling 192d38ceaf9SAlex Deucher * process need to be interrupted by another process and another 193d38ceaf9SAlex Deucher * process needs to update the last_seq btw the atomic read and 194d38ceaf9SAlex Deucher * xchg of the current process. 195d38ceaf9SAlex Deucher * 196d38ceaf9SAlex Deucher * More over for this to go in infinite loop there need to be 19786c2b790SJammy Zhou * continuously new fence signaled ie amdgpu_fence_read needs 198d38ceaf9SAlex Deucher * to return a different value each time for both the currently 199d38ceaf9SAlex Deucher * polling process and the other process that xchg the last_seq 200d38ceaf9SAlex Deucher * btw atomic read and xchg of the current process. And the 201d38ceaf9SAlex Deucher * value the other process set as last seq must be higher than 202d38ceaf9SAlex Deucher * the seq value we just read. Which means that current process 20386c2b790SJammy Zhou * need to be interrupted after amdgpu_fence_read and before 204d38ceaf9SAlex Deucher * atomic xchg. 205d38ceaf9SAlex Deucher * 206d38ceaf9SAlex Deucher * To be even more safe we count the number of time we loop and 207d38ceaf9SAlex Deucher * we bail after 10 loop just accepting the fact that we might 208d38ceaf9SAlex Deucher * have temporarly set the last_seq not to the true real last 209d38ceaf9SAlex Deucher * seq but to an older one. 210d38ceaf9SAlex Deucher */ 211d38ceaf9SAlex Deucher last_seq = atomic64_read(&ring->fence_drv.last_seq); 212d38ceaf9SAlex Deucher do { 213d38ceaf9SAlex Deucher last_emitted = ring->fence_drv.sync_seq[ring->idx]; 214d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 215d38ceaf9SAlex Deucher seq |= last_seq & 0xffffffff00000000LL; 216d38ceaf9SAlex Deucher if (seq < last_seq) { 217d38ceaf9SAlex Deucher seq &= 0xffffffff; 218d38ceaf9SAlex Deucher seq |= last_emitted & 0xffffffff00000000LL; 219d38ceaf9SAlex Deucher } 220d38ceaf9SAlex Deucher 221d38ceaf9SAlex Deucher if (seq <= last_seq || seq > last_emitted) { 222d38ceaf9SAlex Deucher break; 223d38ceaf9SAlex Deucher } 224d38ceaf9SAlex Deucher /* If we loop over we don't want to return without 225d38ceaf9SAlex Deucher * checking if a fence is signaled as it means that the 226d38ceaf9SAlex Deucher * seq we just read is different from the previous on. 227d38ceaf9SAlex Deucher */ 228d38ceaf9SAlex Deucher wake = true; 229d38ceaf9SAlex Deucher last_seq = seq; 230d38ceaf9SAlex Deucher if ((count_loop++) > 10) { 231d38ceaf9SAlex Deucher /* We looped over too many time leave with the 232d38ceaf9SAlex Deucher * fact that we might have set an older fence 233d38ceaf9SAlex Deucher * seq then the current real last seq as signaled 234d38ceaf9SAlex Deucher * by the hw. 235d38ceaf9SAlex Deucher */ 236d38ceaf9SAlex Deucher break; 237d38ceaf9SAlex Deucher } 238d38ceaf9SAlex Deucher } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 239d38ceaf9SAlex Deucher 240d38ceaf9SAlex Deucher if (seq < last_emitted) 241d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 242d38ceaf9SAlex Deucher 243d38ceaf9SAlex Deucher return wake; 244d38ceaf9SAlex Deucher } 245d38ceaf9SAlex Deucher 246d38ceaf9SAlex Deucher /** 247d38ceaf9SAlex Deucher * amdgpu_fence_check_lockup - check for hardware lockup 248d38ceaf9SAlex Deucher * 249d38ceaf9SAlex Deucher * @work: delayed work item 250d38ceaf9SAlex Deucher * 251d38ceaf9SAlex Deucher * Checks for fence activity and if there is none probe 252d38ceaf9SAlex Deucher * the hardware if a lockup occured. 253d38ceaf9SAlex Deucher */ 254d38ceaf9SAlex Deucher static void amdgpu_fence_check_lockup(struct work_struct *work) 255d38ceaf9SAlex Deucher { 256d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fence_drv; 257d38ceaf9SAlex Deucher struct amdgpu_ring *ring; 258d38ceaf9SAlex Deucher 259d38ceaf9SAlex Deucher fence_drv = container_of(work, struct amdgpu_fence_driver, 260d38ceaf9SAlex Deucher lockup_work.work); 261d38ceaf9SAlex Deucher ring = fence_drv->ring; 262d38ceaf9SAlex Deucher 263d38ceaf9SAlex Deucher if (!down_read_trylock(&ring->adev->exclusive_lock)) { 264d38ceaf9SAlex Deucher /* just reschedule the check if a reset is going on */ 265d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 266d38ceaf9SAlex Deucher return; 267d38ceaf9SAlex Deucher } 268d38ceaf9SAlex Deucher 2697f06c236Smonk.liu if (amdgpu_fence_activity(ring)) { 2707f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 2717f06c236Smonk.liu } 272d38ceaf9SAlex Deucher else if (amdgpu_ring_is_lockup(ring)) { 273d38ceaf9SAlex Deucher /* good news we believe it's a lockup */ 274d38ceaf9SAlex Deucher dev_warn(ring->adev->dev, "GPU lockup (current fence id " 275d38ceaf9SAlex Deucher "0x%016llx last fence id 0x%016llx on ring %d)\n", 276d38ceaf9SAlex Deucher (uint64_t)atomic64_read(&fence_drv->last_seq), 277d38ceaf9SAlex Deucher fence_drv->sync_seq[ring->idx], ring->idx); 278d38ceaf9SAlex Deucher 279d38ceaf9SAlex Deucher /* remember that we need an reset */ 280d38ceaf9SAlex Deucher ring->adev->needs_reset = true; 2817f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 282d38ceaf9SAlex Deucher } 283d38ceaf9SAlex Deucher up_read(&ring->adev->exclusive_lock); 284d38ceaf9SAlex Deucher } 285d38ceaf9SAlex Deucher 286d38ceaf9SAlex Deucher /** 287d38ceaf9SAlex Deucher * amdgpu_fence_process - process a fence 288d38ceaf9SAlex Deucher * 289d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 290d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 291d38ceaf9SAlex Deucher * 292d38ceaf9SAlex Deucher * Checks the current fence value and wakes the fence queue 293d38ceaf9SAlex Deucher * if the sequence number has increased (all asics). 294d38ceaf9SAlex Deucher */ 295d38ceaf9SAlex Deucher void amdgpu_fence_process(struct amdgpu_ring *ring) 296d38ceaf9SAlex Deucher { 29768ed3de4SChristian König if (amdgpu_fence_activity(ring)) 2987f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 299e0d8f3c3SChunming Zhou } 300d38ceaf9SAlex Deucher 301d38ceaf9SAlex Deucher /** 302d38ceaf9SAlex Deucher * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled 303d38ceaf9SAlex Deucher * 304d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 305d38ceaf9SAlex Deucher * @seq: sequence number 306d38ceaf9SAlex Deucher * 307d38ceaf9SAlex Deucher * Check if the last signaled fence sequnce number is >= the requested 308d38ceaf9SAlex Deucher * sequence number (all asics). 309d38ceaf9SAlex Deucher * Returns true if the fence has signaled (current fence value 310d38ceaf9SAlex Deucher * is >= requested value) or false if it has not (current fence 311d38ceaf9SAlex Deucher * value is < the requested value. Helper function for 312d38ceaf9SAlex Deucher * amdgpu_fence_signaled(). 313d38ceaf9SAlex Deucher */ 314d38ceaf9SAlex Deucher static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) 315d38ceaf9SAlex Deucher { 316d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 317d38ceaf9SAlex Deucher return true; 318d38ceaf9SAlex Deucher 319d38ceaf9SAlex Deucher /* poll new last sequence at least once */ 320d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 321d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 322d38ceaf9SAlex Deucher return true; 323d38ceaf9SAlex Deucher 324d38ceaf9SAlex Deucher return false; 325d38ceaf9SAlex Deucher } 326d38ceaf9SAlex Deucher 327d38ceaf9SAlex Deucher static bool amdgpu_fence_is_signaled(struct fence *f) 328d38ceaf9SAlex Deucher { 329d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 330d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 331d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 332d38ceaf9SAlex Deucher 333d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 334d38ceaf9SAlex Deucher return true; 335d38ceaf9SAlex Deucher 336d38ceaf9SAlex Deucher if (down_read_trylock(&adev->exclusive_lock)) { 337d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 338d38ceaf9SAlex Deucher up_read(&adev->exclusive_lock); 339d38ceaf9SAlex Deucher 340d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 341d38ceaf9SAlex Deucher return true; 342d38ceaf9SAlex Deucher } 343d38ceaf9SAlex Deucher return false; 344d38ceaf9SAlex Deucher } 345d38ceaf9SAlex Deucher 346d38ceaf9SAlex Deucher /** 347d38ceaf9SAlex Deucher * amdgpu_fence_enable_signaling - enable signalling on fence 348d38ceaf9SAlex Deucher * @fence: fence 349d38ceaf9SAlex Deucher * 350d38ceaf9SAlex Deucher * This function is called with fence_queue lock held, and adds a callback 351d38ceaf9SAlex Deucher * to fence_queue that checks if this fence is signaled, and if so it 352d38ceaf9SAlex Deucher * signals the fence and removes itself. 353d38ceaf9SAlex Deucher */ 354d38ceaf9SAlex Deucher static bool amdgpu_fence_enable_signaling(struct fence *f) 355d38ceaf9SAlex Deucher { 356d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 357d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 358d38ceaf9SAlex Deucher 359d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 360d38ceaf9SAlex Deucher return false; 361d38ceaf9SAlex Deucher 362d38ceaf9SAlex Deucher fence->fence_wake.flags = 0; 363d38ceaf9SAlex Deucher fence->fence_wake.private = NULL; 364d38ceaf9SAlex Deucher fence->fence_wake.func = amdgpu_fence_check_signaled; 3657f06c236Smonk.liu __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 366d38ceaf9SAlex Deucher fence_get(f); 367d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 368d38ceaf9SAlex Deucher return true; 369d38ceaf9SAlex Deucher } 370d38ceaf9SAlex Deucher 3717f06c236Smonk.liu /* 3727f06c236Smonk.liu * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal 3737f06c236Smonk.liu * @ring: ring to wait on for the seq number 3747f06c236Smonk.liu * @seq: seq number wait for 3757f06c236Smonk.liu * @intr: if interruptible 3767f06c236Smonk.liu * @timeout: jiffies before time out 377d38ceaf9SAlex Deucher * 3787f06c236Smonk.liu * return value: 3797f06c236Smonk.liu * 0: time out but seq not signaled, and gpu not hang 3807f06c236Smonk.liu * X (X > 0): seq signaled and X means how many jiffies remains before time out 3817f06c236Smonk.liu * -EDEADL: GPU hang before time out 3827f06c236Smonk.liu * -ESYSRESTART: interrupted before seq signaled 3837f06c236Smonk.liu * -EINVAL: some paramter is not valid 384d38ceaf9SAlex Deucher */ 3857f06c236Smonk.liu static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq, 3867f06c236Smonk.liu bool intr, long timeout) 387d38ceaf9SAlex Deucher { 3887f06c236Smonk.liu struct amdgpu_device *adev = ring->adev; 3897f06c236Smonk.liu long r = 0; 3907f06c236Smonk.liu bool signaled = false; 391d38ceaf9SAlex Deucher 3927f06c236Smonk.liu BUG_ON(!ring); 3937f06c236Smonk.liu if (seq > ring->fence_drv.sync_seq[ring->idx]) 3947f06c236Smonk.liu return -EINVAL; 395d38ceaf9SAlex Deucher 3967f06c236Smonk.liu if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 3977f06c236Smonk.liu return timeout; 398d38ceaf9SAlex Deucher 3997f06c236Smonk.liu while (1) { 400d38ceaf9SAlex Deucher if (intr) { 4017f06c236Smonk.liu r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, ( 4027f06c236Smonk.liu (signaled = amdgpu_fence_seq_signaled(ring, seq)) 403d38ceaf9SAlex Deucher || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 404d38ceaf9SAlex Deucher 4057f06c236Smonk.liu if (r == -ERESTARTSYS) /* interrupted */ 406d38ceaf9SAlex Deucher return r; 4077f06c236Smonk.liu } else { 4087f06c236Smonk.liu r = wait_event_timeout(ring->fence_drv.fence_queue, ( 4097f06c236Smonk.liu (signaled = amdgpu_fence_seq_signaled(ring, seq)) 4107f06c236Smonk.liu || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 4117f06c236Smonk.liu } 412d38ceaf9SAlex Deucher 4137f06c236Smonk.liu if (signaled) { 4147f06c236Smonk.liu /* seq signaled */ 4157f06c236Smonk.liu if (timeout == MAX_SCHEDULE_TIMEOUT) 4167f06c236Smonk.liu return timeout; 4177f06c236Smonk.liu return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r); 4187f06c236Smonk.liu } 4197f06c236Smonk.liu else if (adev->needs_reset) { 420d38ceaf9SAlex Deucher return -EDEADLK; 421d38ceaf9SAlex Deucher } 422d38ceaf9SAlex Deucher 4237f06c236Smonk.liu /* check if it's a lockup */ 4247f06c236Smonk.liu if (amdgpu_ring_is_lockup(ring)) { 4257f06c236Smonk.liu uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq); 4267f06c236Smonk.liu /* ring lookup */ 427d38ceaf9SAlex Deucher dev_warn(adev->dev, "GPU lockup (waiting for " 428d38ceaf9SAlex Deucher "0x%016llx last fence id 0x%016llx on" 429d38ceaf9SAlex Deucher " ring %d)\n", 4307f06c236Smonk.liu seq, last_seq, ring->idx); 4317f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 432d38ceaf9SAlex Deucher return -EDEADLK; 433d38ceaf9SAlex Deucher } 434d38ceaf9SAlex Deucher 435d38ceaf9SAlex Deucher if (timeout < MAX_SCHEDULE_TIMEOUT) { 436d38ceaf9SAlex Deucher timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; 4377f06c236Smonk.liu if (timeout < 1) 438d38ceaf9SAlex Deucher return 0; 439d38ceaf9SAlex Deucher } 440d38ceaf9SAlex Deucher } 441d38ceaf9SAlex Deucher } 4427f06c236Smonk.liu 443d38ceaf9SAlex Deucher /** 444d38ceaf9SAlex Deucher * amdgpu_fence_wait_next - wait for the next fence to signal 445d38ceaf9SAlex Deucher * 446d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 447d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 448d38ceaf9SAlex Deucher * 449d38ceaf9SAlex Deucher * Wait for the next fence on the requested ring to signal (all asics). 450d38ceaf9SAlex Deucher * Returns 0 if the next fence has passed, error for all other cases. 451d38ceaf9SAlex Deucher * Caller must hold ring lock. 452d38ceaf9SAlex Deucher */ 453d38ceaf9SAlex Deucher int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 454d38ceaf9SAlex Deucher { 455d38ceaf9SAlex Deucher long r; 456d38ceaf9SAlex Deucher 4577f06c236Smonk.liu uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 4587f06c236Smonk.liu if (seq >= ring->fence_drv.sync_seq[ring->idx]) 459d38ceaf9SAlex Deucher return -ENOENT; 4607f06c236Smonk.liu r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT); 461d38ceaf9SAlex Deucher if (r < 0) 462d38ceaf9SAlex Deucher return r; 4637f06c236Smonk.liu 464d38ceaf9SAlex Deucher return 0; 465d38ceaf9SAlex Deucher } 466d38ceaf9SAlex Deucher 467d38ceaf9SAlex Deucher /** 468d38ceaf9SAlex Deucher * amdgpu_fence_wait_empty - wait for all fences to signal 469d38ceaf9SAlex Deucher * 470d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 471d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 472d38ceaf9SAlex Deucher * 473d38ceaf9SAlex Deucher * Wait for all fences on the requested ring to signal (all asics). 474d38ceaf9SAlex Deucher * Returns 0 if the fences have passed, error for all other cases. 475d38ceaf9SAlex Deucher * Caller must hold ring lock. 476d38ceaf9SAlex Deucher */ 477d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 478d38ceaf9SAlex Deucher { 479d38ceaf9SAlex Deucher long r; 480d38ceaf9SAlex Deucher 4817f06c236Smonk.liu uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; 4827f06c236Smonk.liu if (!seq) 483d38ceaf9SAlex Deucher return 0; 484d38ceaf9SAlex Deucher 4857f06c236Smonk.liu r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT); 4867f06c236Smonk.liu 487d38ceaf9SAlex Deucher if (r < 0) { 488d38ceaf9SAlex Deucher if (r == -EDEADLK) 489d38ceaf9SAlex Deucher return -EDEADLK; 490d38ceaf9SAlex Deucher 4917f06c236Smonk.liu dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", 492d38ceaf9SAlex Deucher ring->idx, r); 493d38ceaf9SAlex Deucher } 494d38ceaf9SAlex Deucher return 0; 495d38ceaf9SAlex Deucher } 496d38ceaf9SAlex Deucher 497d38ceaf9SAlex Deucher /** 498d38ceaf9SAlex Deucher * amdgpu_fence_ref - take a ref on a fence 499d38ceaf9SAlex Deucher * 500d38ceaf9SAlex Deucher * @fence: amdgpu fence object 501d38ceaf9SAlex Deucher * 502d38ceaf9SAlex Deucher * Take a reference on a fence (all asics). 503d38ceaf9SAlex Deucher * Returns the fence. 504d38ceaf9SAlex Deucher */ 505d38ceaf9SAlex Deucher struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence) 506d38ceaf9SAlex Deucher { 507d38ceaf9SAlex Deucher fence_get(&fence->base); 508d38ceaf9SAlex Deucher return fence; 509d38ceaf9SAlex Deucher } 510d38ceaf9SAlex Deucher 511d38ceaf9SAlex Deucher /** 512d38ceaf9SAlex Deucher * amdgpu_fence_unref - remove a ref on a fence 513d38ceaf9SAlex Deucher * 514d38ceaf9SAlex Deucher * @fence: amdgpu fence object 515d38ceaf9SAlex Deucher * 516d38ceaf9SAlex Deucher * Remove a reference on a fence (all asics). 517d38ceaf9SAlex Deucher */ 518d38ceaf9SAlex Deucher void amdgpu_fence_unref(struct amdgpu_fence **fence) 519d38ceaf9SAlex Deucher { 520d38ceaf9SAlex Deucher struct amdgpu_fence *tmp = *fence; 521d38ceaf9SAlex Deucher 522d38ceaf9SAlex Deucher *fence = NULL; 523d38ceaf9SAlex Deucher if (tmp) 524d38ceaf9SAlex Deucher fence_put(&tmp->base); 525d38ceaf9SAlex Deucher } 526d38ceaf9SAlex Deucher 527d38ceaf9SAlex Deucher /** 528d38ceaf9SAlex Deucher * amdgpu_fence_count_emitted - get the count of emitted fences 529d38ceaf9SAlex Deucher * 530d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 531d38ceaf9SAlex Deucher * 532d38ceaf9SAlex Deucher * Get the number of fences emitted on the requested ring (all asics). 533d38ceaf9SAlex Deucher * Returns the number of emitted fences on the ring. Used by the 534d38ceaf9SAlex Deucher * dynpm code to ring track activity. 535d38ceaf9SAlex Deucher */ 536d38ceaf9SAlex Deucher unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 537d38ceaf9SAlex Deucher { 538d38ceaf9SAlex Deucher uint64_t emitted; 539d38ceaf9SAlex Deucher 540d38ceaf9SAlex Deucher /* We are not protected by ring lock when reading the last sequence 541d38ceaf9SAlex Deucher * but it's ok to report slightly wrong fence count here. 542d38ceaf9SAlex Deucher */ 543d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 544d38ceaf9SAlex Deucher emitted = ring->fence_drv.sync_seq[ring->idx] 545d38ceaf9SAlex Deucher - atomic64_read(&ring->fence_drv.last_seq); 546d38ceaf9SAlex Deucher /* to avoid 32bits warp around */ 547d38ceaf9SAlex Deucher if (emitted > 0x10000000) 548d38ceaf9SAlex Deucher emitted = 0x10000000; 549d38ceaf9SAlex Deucher 550d38ceaf9SAlex Deucher return (unsigned)emitted; 551d38ceaf9SAlex Deucher } 552d38ceaf9SAlex Deucher 553d38ceaf9SAlex Deucher /** 554d38ceaf9SAlex Deucher * amdgpu_fence_need_sync - do we need a semaphore 555d38ceaf9SAlex Deucher * 556d38ceaf9SAlex Deucher * @fence: amdgpu fence object 557d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 558d38ceaf9SAlex Deucher * 559d38ceaf9SAlex Deucher * Check if the fence needs to be synced against another ring 560d38ceaf9SAlex Deucher * (all asics). If so, we need to emit a semaphore. 561d38ceaf9SAlex Deucher * Returns true if we need to sync with another ring, false if 562d38ceaf9SAlex Deucher * not. 563d38ceaf9SAlex Deucher */ 564d38ceaf9SAlex Deucher bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 565d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 566d38ceaf9SAlex Deucher { 567d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fdrv; 568d38ceaf9SAlex Deucher 569d38ceaf9SAlex Deucher if (!fence) 570d38ceaf9SAlex Deucher return false; 571d38ceaf9SAlex Deucher 572d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 573d38ceaf9SAlex Deucher return false; 574d38ceaf9SAlex Deucher 575d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 576d38ceaf9SAlex Deucher fdrv = &dst_ring->fence_drv; 577d38ceaf9SAlex Deucher if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) 578d38ceaf9SAlex Deucher return false; 579d38ceaf9SAlex Deucher 580d38ceaf9SAlex Deucher return true; 581d38ceaf9SAlex Deucher } 582d38ceaf9SAlex Deucher 583d38ceaf9SAlex Deucher /** 584d38ceaf9SAlex Deucher * amdgpu_fence_note_sync - record the sync point 585d38ceaf9SAlex Deucher * 586d38ceaf9SAlex Deucher * @fence: amdgpu fence object 587d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 588d38ceaf9SAlex Deucher * 589d38ceaf9SAlex Deucher * Note the sequence number at which point the fence will 590d38ceaf9SAlex Deucher * be synced with the requested ring (all asics). 591d38ceaf9SAlex Deucher */ 592d38ceaf9SAlex Deucher void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 593d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 594d38ceaf9SAlex Deucher { 595d38ceaf9SAlex Deucher struct amdgpu_fence_driver *dst, *src; 596d38ceaf9SAlex Deucher unsigned i; 597d38ceaf9SAlex Deucher 598d38ceaf9SAlex Deucher if (!fence) 599d38ceaf9SAlex Deucher return; 600d38ceaf9SAlex Deucher 601d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 602d38ceaf9SAlex Deucher return; 603d38ceaf9SAlex Deucher 604d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 605d38ceaf9SAlex Deucher src = &fence->ring->fence_drv; 606d38ceaf9SAlex Deucher dst = &dst_ring->fence_drv; 607d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 608d38ceaf9SAlex Deucher if (i == dst_ring->idx) 609d38ceaf9SAlex Deucher continue; 610d38ceaf9SAlex Deucher 611d38ceaf9SAlex Deucher dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 612d38ceaf9SAlex Deucher } 613d38ceaf9SAlex Deucher } 614d38ceaf9SAlex Deucher 615d38ceaf9SAlex Deucher /** 616d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring - make the fence driver 617d38ceaf9SAlex Deucher * ready for use on the requested ring. 618d38ceaf9SAlex Deucher * 619d38ceaf9SAlex Deucher * @ring: ring to start the fence driver on 620d38ceaf9SAlex Deucher * @irq_src: interrupt source to use for this ring 621d38ceaf9SAlex Deucher * @irq_type: interrupt type to use for this ring 622d38ceaf9SAlex Deucher * 623d38ceaf9SAlex Deucher * Make the fence driver ready for processing (all asics). 624d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 625d38ceaf9SAlex Deucher * start the fence driver on the rings it has. 626d38ceaf9SAlex Deucher * Returns 0 for success, errors for failure. 627d38ceaf9SAlex Deucher */ 628d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 629d38ceaf9SAlex Deucher struct amdgpu_irq_src *irq_src, 630d38ceaf9SAlex Deucher unsigned irq_type) 631d38ceaf9SAlex Deucher { 632d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 633d38ceaf9SAlex Deucher uint64_t index; 634d38ceaf9SAlex Deucher 635d38ceaf9SAlex Deucher if (ring != &adev->uvd.ring) { 636d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 637d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 638d38ceaf9SAlex Deucher } else { 639d38ceaf9SAlex Deucher /* put fence directly behind firmware */ 640d38ceaf9SAlex Deucher index = ALIGN(adev->uvd.fw->size, 8); 641d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 642d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 643d38ceaf9SAlex Deucher } 644d38ceaf9SAlex Deucher amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 645c6a4079bSChunming Zhou amdgpu_irq_get(adev, irq_src, irq_type); 646c6a4079bSChunming Zhou 647d38ceaf9SAlex Deucher ring->fence_drv.irq_src = irq_src; 648d38ceaf9SAlex Deucher ring->fence_drv.irq_type = irq_type; 649c6a4079bSChunming Zhou ring->fence_drv.initialized = true; 650c6a4079bSChunming Zhou 651d38ceaf9SAlex Deucher dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 652d38ceaf9SAlex Deucher "cpu addr 0x%p\n", ring->idx, 653d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 654d38ceaf9SAlex Deucher return 0; 655d38ceaf9SAlex Deucher } 656d38ceaf9SAlex Deucher 657d38ceaf9SAlex Deucher /** 658d38ceaf9SAlex Deucher * amdgpu_fence_driver_init_ring - init the fence driver 659d38ceaf9SAlex Deucher * for the requested ring. 660d38ceaf9SAlex Deucher * 661d38ceaf9SAlex Deucher * @ring: ring to init the fence driver on 662d38ceaf9SAlex Deucher * 663d38ceaf9SAlex Deucher * Init the fence driver for the requested ring (all asics). 664d38ceaf9SAlex Deucher * Helper function for amdgpu_fence_driver_init(). 665d38ceaf9SAlex Deucher */ 666d38ceaf9SAlex Deucher void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 667d38ceaf9SAlex Deucher { 668d38ceaf9SAlex Deucher int i; 669d38ceaf9SAlex Deucher 670d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = NULL; 671d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = 0; 672d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 673d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i] = 0; 674d38ceaf9SAlex Deucher 675d38ceaf9SAlex Deucher atomic64_set(&ring->fence_drv.last_seq, 0); 676d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 677d38ceaf9SAlex Deucher 678d38ceaf9SAlex Deucher INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 679d38ceaf9SAlex Deucher amdgpu_fence_check_lockup); 680d38ceaf9SAlex Deucher ring->fence_drv.ring = ring; 681b80d8475SAlex Deucher 682b80d8475SAlex Deucher if (amdgpu_enable_scheduler) { 683b80d8475SAlex Deucher ring->scheduler = amd_sched_create((void *)ring->adev, 684c1b69ed0SChunming Zhou &amdgpu_sched_ops, 6854afcb303SJammy Zhou ring->idx, 5, 0, 6864afcb303SJammy Zhou amdgpu_sched_hw_submission); 687b80d8475SAlex Deucher if (!ring->scheduler) 688b80d8475SAlex Deucher DRM_ERROR("Failed to create scheduler on ring %d.\n", 689b80d8475SAlex Deucher ring->idx); 690b80d8475SAlex Deucher } 691d38ceaf9SAlex Deucher } 692d38ceaf9SAlex Deucher 693d38ceaf9SAlex Deucher /** 694d38ceaf9SAlex Deucher * amdgpu_fence_driver_init - init the fence driver 695d38ceaf9SAlex Deucher * for all possible rings. 696d38ceaf9SAlex Deucher * 697d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 698d38ceaf9SAlex Deucher * 699d38ceaf9SAlex Deucher * Init the fence driver for all possible rings (all asics). 700d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 701d38ceaf9SAlex Deucher * start the fence driver on the rings it has using 702d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring(). 703d38ceaf9SAlex Deucher * Returns 0 for success. 704d38ceaf9SAlex Deucher */ 705d38ceaf9SAlex Deucher int amdgpu_fence_driver_init(struct amdgpu_device *adev) 706d38ceaf9SAlex Deucher { 707d38ceaf9SAlex Deucher if (amdgpu_debugfs_fence_init(adev)) 708d38ceaf9SAlex Deucher dev_err(adev->dev, "fence debugfs file creation failed\n"); 709d38ceaf9SAlex Deucher 710d38ceaf9SAlex Deucher return 0; 711d38ceaf9SAlex Deucher } 712d38ceaf9SAlex Deucher 713d38ceaf9SAlex Deucher /** 714d38ceaf9SAlex Deucher * amdgpu_fence_driver_fini - tear down the fence driver 715d38ceaf9SAlex Deucher * for all possible rings. 716d38ceaf9SAlex Deucher * 717d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 718d38ceaf9SAlex Deucher * 719d38ceaf9SAlex Deucher * Tear down the fence driver for all possible rings (all asics). 720d38ceaf9SAlex Deucher */ 721d38ceaf9SAlex Deucher void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 722d38ceaf9SAlex Deucher { 723d38ceaf9SAlex Deucher int i, r; 724d38ceaf9SAlex Deucher 725d38ceaf9SAlex Deucher mutex_lock(&adev->ring_lock); 726d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 727d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 728d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 729d38ceaf9SAlex Deucher continue; 730d38ceaf9SAlex Deucher r = amdgpu_fence_wait_empty(ring); 731d38ceaf9SAlex Deucher if (r) { 732d38ceaf9SAlex Deucher /* no need to trigger GPU reset as we are unloading */ 733d38ceaf9SAlex Deucher amdgpu_fence_driver_force_completion(adev); 734d38ceaf9SAlex Deucher } 7357f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 736c6a4079bSChunming Zhou amdgpu_irq_put(adev, ring->fence_drv.irq_src, 737c6a4079bSChunming Zhou ring->fence_drv.irq_type); 738b80d8475SAlex Deucher if (ring->scheduler) 739b80d8475SAlex Deucher amd_sched_destroy(ring->scheduler); 740d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 741d38ceaf9SAlex Deucher } 742d38ceaf9SAlex Deucher mutex_unlock(&adev->ring_lock); 743d38ceaf9SAlex Deucher } 744d38ceaf9SAlex Deucher 745d38ceaf9SAlex Deucher /** 7465ceb54c6SAlex Deucher * amdgpu_fence_driver_suspend - suspend the fence driver 7475ceb54c6SAlex Deucher * for all possible rings. 7485ceb54c6SAlex Deucher * 7495ceb54c6SAlex Deucher * @adev: amdgpu device pointer 7505ceb54c6SAlex Deucher * 7515ceb54c6SAlex Deucher * Suspend the fence driver for all possible rings (all asics). 7525ceb54c6SAlex Deucher */ 7535ceb54c6SAlex Deucher void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 7545ceb54c6SAlex Deucher { 7555ceb54c6SAlex Deucher int i, r; 7565ceb54c6SAlex Deucher 7575ceb54c6SAlex Deucher mutex_lock(&adev->ring_lock); 7585ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 7595ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 7605ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 7615ceb54c6SAlex Deucher continue; 7625ceb54c6SAlex Deucher 7635ceb54c6SAlex Deucher /* wait for gpu to finish processing current batch */ 7645ceb54c6SAlex Deucher r = amdgpu_fence_wait_empty(ring); 7655ceb54c6SAlex Deucher if (r) { 7665ceb54c6SAlex Deucher /* delay GPU reset to resume */ 7675ceb54c6SAlex Deucher amdgpu_fence_driver_force_completion(adev); 7685ceb54c6SAlex Deucher } 7695ceb54c6SAlex Deucher 7705ceb54c6SAlex Deucher /* disable the interrupt */ 7715ceb54c6SAlex Deucher amdgpu_irq_put(adev, ring->fence_drv.irq_src, 7725ceb54c6SAlex Deucher ring->fence_drv.irq_type); 7735ceb54c6SAlex Deucher } 7745ceb54c6SAlex Deucher mutex_unlock(&adev->ring_lock); 7755ceb54c6SAlex Deucher } 7765ceb54c6SAlex Deucher 7775ceb54c6SAlex Deucher /** 7785ceb54c6SAlex Deucher * amdgpu_fence_driver_resume - resume the fence driver 7795ceb54c6SAlex Deucher * for all possible rings. 7805ceb54c6SAlex Deucher * 7815ceb54c6SAlex Deucher * @adev: amdgpu device pointer 7825ceb54c6SAlex Deucher * 7835ceb54c6SAlex Deucher * Resume the fence driver for all possible rings (all asics). 7845ceb54c6SAlex Deucher * Not all asics have all rings, so each asic will only 7855ceb54c6SAlex Deucher * start the fence driver on the rings it has using 7865ceb54c6SAlex Deucher * amdgpu_fence_driver_start_ring(). 7875ceb54c6SAlex Deucher * Returns 0 for success. 7885ceb54c6SAlex Deucher */ 7895ceb54c6SAlex Deucher void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 7905ceb54c6SAlex Deucher { 7915ceb54c6SAlex Deucher int i; 7925ceb54c6SAlex Deucher 7935ceb54c6SAlex Deucher mutex_lock(&adev->ring_lock); 7945ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 7955ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 7965ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 7975ceb54c6SAlex Deucher continue; 7985ceb54c6SAlex Deucher 7995ceb54c6SAlex Deucher /* enable the interrupt */ 8005ceb54c6SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 8015ceb54c6SAlex Deucher ring->fence_drv.irq_type); 8025ceb54c6SAlex Deucher } 8035ceb54c6SAlex Deucher mutex_unlock(&adev->ring_lock); 8045ceb54c6SAlex Deucher } 8055ceb54c6SAlex Deucher 8065ceb54c6SAlex Deucher /** 807d38ceaf9SAlex Deucher * amdgpu_fence_driver_force_completion - force all fence waiter to complete 808d38ceaf9SAlex Deucher * 809d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 810d38ceaf9SAlex Deucher * 811d38ceaf9SAlex Deucher * In case of GPU reset failure make sure no process keep waiting on fence 812d38ceaf9SAlex Deucher * that will never complete. 813d38ceaf9SAlex Deucher */ 814d38ceaf9SAlex Deucher void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) 815d38ceaf9SAlex Deucher { 816d38ceaf9SAlex Deucher int i; 817d38ceaf9SAlex Deucher 818d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 819d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 820d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 821d38ceaf9SAlex Deucher continue; 822d38ceaf9SAlex Deucher 823d38ceaf9SAlex Deucher amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); 824d38ceaf9SAlex Deucher } 825d38ceaf9SAlex Deucher } 826d38ceaf9SAlex Deucher 827d38ceaf9SAlex Deucher 828d38ceaf9SAlex Deucher /* 829d38ceaf9SAlex Deucher * Fence debugfs 830d38ceaf9SAlex Deucher */ 831d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 832d38ceaf9SAlex Deucher static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 833d38ceaf9SAlex Deucher { 834d38ceaf9SAlex Deucher struct drm_info_node *node = (struct drm_info_node *)m->private; 835d38ceaf9SAlex Deucher struct drm_device *dev = node->minor->dev; 836d38ceaf9SAlex Deucher struct amdgpu_device *adev = dev->dev_private; 837d38ceaf9SAlex Deucher int i, j; 838d38ceaf9SAlex Deucher 839d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 840d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 841d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 842d38ceaf9SAlex Deucher continue; 843d38ceaf9SAlex Deucher 844d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 845d38ceaf9SAlex Deucher 846344c19f9SChristian König seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 847d38ceaf9SAlex Deucher seq_printf(m, "Last signaled fence 0x%016llx\n", 848d38ceaf9SAlex Deucher (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 849d38ceaf9SAlex Deucher seq_printf(m, "Last emitted 0x%016llx\n", 850d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i]); 851d38ceaf9SAlex Deucher 852d38ceaf9SAlex Deucher for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 853d38ceaf9SAlex Deucher struct amdgpu_ring *other = adev->rings[j]; 854344c19f9SChristian König if (i != j && other && other->fence_drv.initialized && 855344c19f9SChristian König ring->fence_drv.sync_seq[j]) 856d38ceaf9SAlex Deucher seq_printf(m, "Last sync to ring %d 0x%016llx\n", 857d38ceaf9SAlex Deucher j, ring->fence_drv.sync_seq[j]); 858d38ceaf9SAlex Deucher } 859d38ceaf9SAlex Deucher } 860d38ceaf9SAlex Deucher return 0; 861d38ceaf9SAlex Deucher } 862d38ceaf9SAlex Deucher 863d38ceaf9SAlex Deucher static struct drm_info_list amdgpu_debugfs_fence_list[] = { 864d38ceaf9SAlex Deucher {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 865d38ceaf9SAlex Deucher }; 866d38ceaf9SAlex Deucher #endif 867d38ceaf9SAlex Deucher 868d38ceaf9SAlex Deucher int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 869d38ceaf9SAlex Deucher { 870d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 871d38ceaf9SAlex Deucher return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); 872d38ceaf9SAlex Deucher #else 873d38ceaf9SAlex Deucher return 0; 874d38ceaf9SAlex Deucher #endif 875d38ceaf9SAlex Deucher } 876d38ceaf9SAlex Deucher 877d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_driver_name(struct fence *fence) 878d38ceaf9SAlex Deucher { 879d38ceaf9SAlex Deucher return "amdgpu"; 880d38ceaf9SAlex Deucher } 881d38ceaf9SAlex Deucher 882d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_timeline_name(struct fence *f) 883d38ceaf9SAlex Deucher { 884d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 885d38ceaf9SAlex Deucher return (const char *)fence->ring->name; 886d38ceaf9SAlex Deucher } 887d38ceaf9SAlex Deucher 888d38ceaf9SAlex Deucher static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) 889d38ceaf9SAlex Deucher { 890d38ceaf9SAlex Deucher return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 891d38ceaf9SAlex Deucher } 892d38ceaf9SAlex Deucher 893332dfe90Smonk.liu static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) 894332dfe90Smonk.liu { 895332dfe90Smonk.liu int idx; 896332dfe90Smonk.liu struct amdgpu_fence *fence; 897332dfe90Smonk.liu 898332dfe90Smonk.liu idx = 0; 899332dfe90Smonk.liu for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 900332dfe90Smonk.liu fence = fences[idx]; 901332dfe90Smonk.liu if (fence) { 902332dfe90Smonk.liu if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 903332dfe90Smonk.liu return true; 904332dfe90Smonk.liu } 905332dfe90Smonk.liu } 906332dfe90Smonk.liu return false; 907332dfe90Smonk.liu } 908332dfe90Smonk.liu 909d38ceaf9SAlex Deucher struct amdgpu_wait_cb { 910d38ceaf9SAlex Deucher struct fence_cb base; 911d38ceaf9SAlex Deucher struct task_struct *task; 912d38ceaf9SAlex Deucher }; 913d38ceaf9SAlex Deucher 914d38ceaf9SAlex Deucher static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) 915d38ceaf9SAlex Deucher { 916d38ceaf9SAlex Deucher struct amdgpu_wait_cb *wait = 917d38ceaf9SAlex Deucher container_of(cb, struct amdgpu_wait_cb, base); 918d38ceaf9SAlex Deucher wake_up_process(wait->task); 919d38ceaf9SAlex Deucher } 920d38ceaf9SAlex Deucher 921d38ceaf9SAlex Deucher static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 922d38ceaf9SAlex Deucher signed long t) 923d38ceaf9SAlex Deucher { 924e2955155Smonk.liu struct amdgpu_fence *array[AMDGPU_MAX_RINGS]; 925d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 926d38ceaf9SAlex Deucher struct amdgpu_device *adev = fence->ring->adev; 927d38ceaf9SAlex Deucher 928e2955155Smonk.liu memset(&array[0], 0, sizeof(array)); 929e2955155Smonk.liu array[0] = fence; 930d38ceaf9SAlex Deucher 931e2955155Smonk.liu return amdgpu_fence_wait_any(adev, array, intr, t); 932d38ceaf9SAlex Deucher } 933d38ceaf9SAlex Deucher 934332dfe90Smonk.liu /* wait until any fence in array signaled */ 935332dfe90Smonk.liu signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, 936332dfe90Smonk.liu struct amdgpu_fence **array, bool intr, signed long t) 937332dfe90Smonk.liu { 938332dfe90Smonk.liu long idx = 0; 939332dfe90Smonk.liu struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; 940332dfe90Smonk.liu struct amdgpu_fence *fence; 941332dfe90Smonk.liu 942332dfe90Smonk.liu BUG_ON(!array); 943332dfe90Smonk.liu 944332dfe90Smonk.liu for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 945332dfe90Smonk.liu fence = array[idx]; 946332dfe90Smonk.liu if (fence) { 947332dfe90Smonk.liu cb[idx].task = current; 948332dfe90Smonk.liu if (fence_add_callback(&fence->base, 949332dfe90Smonk.liu &cb[idx].base, amdgpu_fence_wait_cb)) 950332dfe90Smonk.liu return t; /* return if fence is already signaled */ 951332dfe90Smonk.liu } 952332dfe90Smonk.liu } 953332dfe90Smonk.liu 954332dfe90Smonk.liu while (t > 0) { 955332dfe90Smonk.liu if (intr) 956332dfe90Smonk.liu set_current_state(TASK_INTERRUPTIBLE); 957332dfe90Smonk.liu else 958332dfe90Smonk.liu set_current_state(TASK_UNINTERRUPTIBLE); 959332dfe90Smonk.liu 960332dfe90Smonk.liu /* 961332dfe90Smonk.liu * amdgpu_test_signaled_any must be called after 962332dfe90Smonk.liu * set_current_state to prevent a race with wake_up_process 963332dfe90Smonk.liu */ 964332dfe90Smonk.liu if (amdgpu_test_signaled_any(array)) 965332dfe90Smonk.liu break; 966332dfe90Smonk.liu 967332dfe90Smonk.liu if (adev->needs_reset) { 968332dfe90Smonk.liu t = -EDEADLK; 969332dfe90Smonk.liu break; 970332dfe90Smonk.liu } 971332dfe90Smonk.liu 972332dfe90Smonk.liu t = schedule_timeout(t); 973332dfe90Smonk.liu 974332dfe90Smonk.liu if (t > 0 && intr && signal_pending(current)) 975332dfe90Smonk.liu t = -ERESTARTSYS; 976332dfe90Smonk.liu } 977332dfe90Smonk.liu 978332dfe90Smonk.liu __set_current_state(TASK_RUNNING); 979332dfe90Smonk.liu 980332dfe90Smonk.liu idx = 0; 981332dfe90Smonk.liu for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { 982332dfe90Smonk.liu fence = array[idx]; 983332dfe90Smonk.liu if (fence) 984332dfe90Smonk.liu fence_remove_callback(&fence->base, &cb[idx].base); 985332dfe90Smonk.liu } 986332dfe90Smonk.liu 987332dfe90Smonk.liu return t; 988332dfe90Smonk.liu } 989332dfe90Smonk.liu 990d38ceaf9SAlex Deucher const struct fence_ops amdgpu_fence_ops = { 991d38ceaf9SAlex Deucher .get_driver_name = amdgpu_fence_get_driver_name, 992d38ceaf9SAlex Deucher .get_timeline_name = amdgpu_fence_get_timeline_name, 993d38ceaf9SAlex Deucher .enable_signaling = amdgpu_fence_enable_signaling, 994d38ceaf9SAlex Deucher .signaled = amdgpu_fence_is_signaled, 995d38ceaf9SAlex Deucher .wait = amdgpu_fence_default_wait, 996d38ceaf9SAlex Deucher .release = NULL, 997d38ceaf9SAlex Deucher }; 998