1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher * Dave Airlie 30d38ceaf9SAlex Deucher */ 31d38ceaf9SAlex Deucher #include <linux/seq_file.h> 32d38ceaf9SAlex Deucher #include <linux/atomic.h> 33d38ceaf9SAlex Deucher #include <linux/wait.h> 34d38ceaf9SAlex Deucher #include <linux/kref.h> 35d38ceaf9SAlex Deucher #include <linux/slab.h> 36d38ceaf9SAlex Deucher #include <linux/firmware.h> 37d38ceaf9SAlex Deucher #include <drm/drmP.h> 38d38ceaf9SAlex Deucher #include "amdgpu.h" 39d38ceaf9SAlex Deucher #include "amdgpu_trace.h" 40d38ceaf9SAlex Deucher 41d38ceaf9SAlex Deucher /* 42d38ceaf9SAlex Deucher * Fences 43d38ceaf9SAlex Deucher * Fences mark an event in the GPUs pipeline and are used 44d38ceaf9SAlex Deucher * for GPU/CPU synchronization. When the fence is written, 45d38ceaf9SAlex Deucher * it is expected that all buffers associated with that fence 46d38ceaf9SAlex Deucher * are no longer in use by the associated ring on the GPU and 47d38ceaf9SAlex Deucher * that the the relevant GPU caches have been flushed. 48d38ceaf9SAlex Deucher */ 49d38ceaf9SAlex Deucher 50d38ceaf9SAlex Deucher /** 51d38ceaf9SAlex Deucher * amdgpu_fence_write - write a fence value 52d38ceaf9SAlex Deucher * 53d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 54d38ceaf9SAlex Deucher * @seq: sequence number to write 55d38ceaf9SAlex Deucher * 56d38ceaf9SAlex Deucher * Writes a fence value to memory (all asics). 57d38ceaf9SAlex Deucher */ 58d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 59d38ceaf9SAlex Deucher { 60d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 61d38ceaf9SAlex Deucher 62d38ceaf9SAlex Deucher if (drv->cpu_addr) 63d38ceaf9SAlex Deucher *drv->cpu_addr = cpu_to_le32(seq); 64d38ceaf9SAlex Deucher } 65d38ceaf9SAlex Deucher 66d38ceaf9SAlex Deucher /** 67d38ceaf9SAlex Deucher * amdgpu_fence_read - read a fence value 68d38ceaf9SAlex Deucher * 69d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 70d38ceaf9SAlex Deucher * 71d38ceaf9SAlex Deucher * Reads a fence value from memory (all asics). 72d38ceaf9SAlex Deucher * Returns the value of the fence read from memory. 73d38ceaf9SAlex Deucher */ 74d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 75d38ceaf9SAlex Deucher { 76d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 77d38ceaf9SAlex Deucher u32 seq = 0; 78d38ceaf9SAlex Deucher 79d38ceaf9SAlex Deucher if (drv->cpu_addr) 80d38ceaf9SAlex Deucher seq = le32_to_cpu(*drv->cpu_addr); 81d38ceaf9SAlex Deucher else 82d38ceaf9SAlex Deucher seq = lower_32_bits(atomic64_read(&drv->last_seq)); 83d38ceaf9SAlex Deucher 84d38ceaf9SAlex Deucher return seq; 85d38ceaf9SAlex Deucher } 86d38ceaf9SAlex Deucher 87d38ceaf9SAlex Deucher /** 88d38ceaf9SAlex Deucher * amdgpu_fence_schedule_check - schedule lockup check 89d38ceaf9SAlex Deucher * 90d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 91d38ceaf9SAlex Deucher * 92d38ceaf9SAlex Deucher * Queues a delayed work item to check for lockups. 93d38ceaf9SAlex Deucher */ 94d38ceaf9SAlex Deucher static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) 95d38ceaf9SAlex Deucher { 96d38ceaf9SAlex Deucher /* 97d38ceaf9SAlex Deucher * Do not reset the timer here with mod_delayed_work, 98d38ceaf9SAlex Deucher * this can livelock in an interaction with TTM delayed destroy. 99d38ceaf9SAlex Deucher */ 100d38ceaf9SAlex Deucher queue_delayed_work(system_power_efficient_wq, 101d38ceaf9SAlex Deucher &ring->fence_drv.lockup_work, 102d38ceaf9SAlex Deucher AMDGPU_FENCE_JIFFIES_TIMEOUT); 103d38ceaf9SAlex Deucher } 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher /** 106d38ceaf9SAlex Deucher * amdgpu_fence_emit - emit a fence on the requested ring 107d38ceaf9SAlex Deucher * 108d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 109d38ceaf9SAlex Deucher * @owner: creator of the fence 110d38ceaf9SAlex Deucher * @fence: amdgpu fence object 111d38ceaf9SAlex Deucher * 112d38ceaf9SAlex Deucher * Emits a fence command on the requested ring (all asics). 113d38ceaf9SAlex Deucher * Returns 0 on success, -ENOMEM on failure. 114d38ceaf9SAlex Deucher */ 115d38ceaf9SAlex Deucher int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 116d38ceaf9SAlex Deucher struct amdgpu_fence **fence) 117d38ceaf9SAlex Deucher { 118d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 119d38ceaf9SAlex Deucher 120d38ceaf9SAlex Deucher /* we are protected by the ring emission mutex */ 121d38ceaf9SAlex Deucher *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 122d38ceaf9SAlex Deucher if ((*fence) == NULL) { 123d38ceaf9SAlex Deucher return -ENOMEM; 124d38ceaf9SAlex Deucher } 125d38ceaf9SAlex Deucher (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; 126d38ceaf9SAlex Deucher (*fence)->ring = ring; 127d38ceaf9SAlex Deucher (*fence)->owner = owner; 128d38ceaf9SAlex Deucher fence_init(&(*fence)->base, &amdgpu_fence_ops, 1297f06c236Smonk.liu &ring->fence_drv.fence_queue.lock, 1307f06c236Smonk.liu adev->fence_context + ring->idx, 131d38ceaf9SAlex Deucher (*fence)->seq); 132890ee23fSChunming Zhou amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 133890ee23fSChunming Zhou (*fence)->seq, 134890ee23fSChunming Zhou AMDGPU_FENCE_FLAG_INT); 135d38ceaf9SAlex Deucher trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 136d38ceaf9SAlex Deucher return 0; 137d38ceaf9SAlex Deucher } 138d38ceaf9SAlex Deucher 139d38ceaf9SAlex Deucher /** 140d38ceaf9SAlex Deucher * amdgpu_fence_check_signaled - callback from fence_queue 141d38ceaf9SAlex Deucher * 142d38ceaf9SAlex Deucher * this function is called with fence_queue lock held, which is also used 143d38ceaf9SAlex Deucher * for the fence locking itself, so unlocked variants are used for 144d38ceaf9SAlex Deucher * fence_signal, and remove_wait_queue. 145d38ceaf9SAlex Deucher */ 146d38ceaf9SAlex Deucher static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 147d38ceaf9SAlex Deucher { 148d38ceaf9SAlex Deucher struct amdgpu_fence *fence; 149d38ceaf9SAlex Deucher struct amdgpu_device *adev; 150d38ceaf9SAlex Deucher u64 seq; 151d38ceaf9SAlex Deucher int ret; 152d38ceaf9SAlex Deucher 153d38ceaf9SAlex Deucher fence = container_of(wait, struct amdgpu_fence, fence_wake); 154d38ceaf9SAlex Deucher adev = fence->ring->adev; 155d38ceaf9SAlex Deucher 156d38ceaf9SAlex Deucher /* 157d38ceaf9SAlex Deucher * We cannot use amdgpu_fence_process here because we're already 158d38ceaf9SAlex Deucher * in the waitqueue, in a call from wake_up_all. 159d38ceaf9SAlex Deucher */ 160d38ceaf9SAlex Deucher seq = atomic64_read(&fence->ring->fence_drv.last_seq); 161d38ceaf9SAlex Deucher if (seq >= fence->seq) { 162d38ceaf9SAlex Deucher ret = fence_signal_locked(&fence->base); 163d38ceaf9SAlex Deucher if (!ret) 164d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "signaled from irq context\n"); 165d38ceaf9SAlex Deucher else 166d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "was already signaled\n"); 167d38ceaf9SAlex Deucher 1687f06c236Smonk.liu __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake); 169d38ceaf9SAlex Deucher fence_put(&fence->base); 170d38ceaf9SAlex Deucher } else 171d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "pending\n"); 172d38ceaf9SAlex Deucher return 0; 173d38ceaf9SAlex Deucher } 174d38ceaf9SAlex Deucher 175d38ceaf9SAlex Deucher /** 176d38ceaf9SAlex Deucher * amdgpu_fence_activity - check for fence activity 177d38ceaf9SAlex Deucher * 178d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 179d38ceaf9SAlex Deucher * 180d38ceaf9SAlex Deucher * Checks the current fence value and calculates the last 181d38ceaf9SAlex Deucher * signalled fence value. Returns true if activity occured 182d38ceaf9SAlex Deucher * on the ring, and the fence_queue should be waken up. 183d38ceaf9SAlex Deucher */ 184d38ceaf9SAlex Deucher static bool amdgpu_fence_activity(struct amdgpu_ring *ring) 185d38ceaf9SAlex Deucher { 186d38ceaf9SAlex Deucher uint64_t seq, last_seq, last_emitted; 187d38ceaf9SAlex Deucher unsigned count_loop = 0; 188d38ceaf9SAlex Deucher bool wake = false; 189d38ceaf9SAlex Deucher 190d38ceaf9SAlex Deucher /* Note there is a scenario here for an infinite loop but it's 191d38ceaf9SAlex Deucher * very unlikely to happen. For it to happen, the current polling 192d38ceaf9SAlex Deucher * process need to be interrupted by another process and another 193d38ceaf9SAlex Deucher * process needs to update the last_seq btw the atomic read and 194d38ceaf9SAlex Deucher * xchg of the current process. 195d38ceaf9SAlex Deucher * 196d38ceaf9SAlex Deucher * More over for this to go in infinite loop there need to be 19786c2b790SJammy Zhou * continuously new fence signaled ie amdgpu_fence_read needs 198d38ceaf9SAlex Deucher * to return a different value each time for both the currently 199d38ceaf9SAlex Deucher * polling process and the other process that xchg the last_seq 200d38ceaf9SAlex Deucher * btw atomic read and xchg of the current process. And the 201d38ceaf9SAlex Deucher * value the other process set as last seq must be higher than 202d38ceaf9SAlex Deucher * the seq value we just read. Which means that current process 20386c2b790SJammy Zhou * need to be interrupted after amdgpu_fence_read and before 204d38ceaf9SAlex Deucher * atomic xchg. 205d38ceaf9SAlex Deucher * 206d38ceaf9SAlex Deucher * To be even more safe we count the number of time we loop and 207d38ceaf9SAlex Deucher * we bail after 10 loop just accepting the fact that we might 208d38ceaf9SAlex Deucher * have temporarly set the last_seq not to the true real last 209d38ceaf9SAlex Deucher * seq but to an older one. 210d38ceaf9SAlex Deucher */ 211d38ceaf9SAlex Deucher last_seq = atomic64_read(&ring->fence_drv.last_seq); 212d38ceaf9SAlex Deucher do { 213d38ceaf9SAlex Deucher last_emitted = ring->fence_drv.sync_seq[ring->idx]; 214d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 215d38ceaf9SAlex Deucher seq |= last_seq & 0xffffffff00000000LL; 216d38ceaf9SAlex Deucher if (seq < last_seq) { 217d38ceaf9SAlex Deucher seq &= 0xffffffff; 218d38ceaf9SAlex Deucher seq |= last_emitted & 0xffffffff00000000LL; 219d38ceaf9SAlex Deucher } 220d38ceaf9SAlex Deucher 221d38ceaf9SAlex Deucher if (seq <= last_seq || seq > last_emitted) { 222d38ceaf9SAlex Deucher break; 223d38ceaf9SAlex Deucher } 224d38ceaf9SAlex Deucher /* If we loop over we don't want to return without 225d38ceaf9SAlex Deucher * checking if a fence is signaled as it means that the 226d38ceaf9SAlex Deucher * seq we just read is different from the previous on. 227d38ceaf9SAlex Deucher */ 228d38ceaf9SAlex Deucher wake = true; 229d38ceaf9SAlex Deucher last_seq = seq; 230d38ceaf9SAlex Deucher if ((count_loop++) > 10) { 231d38ceaf9SAlex Deucher /* We looped over too many time leave with the 232d38ceaf9SAlex Deucher * fact that we might have set an older fence 233d38ceaf9SAlex Deucher * seq then the current real last seq as signaled 234d38ceaf9SAlex Deucher * by the hw. 235d38ceaf9SAlex Deucher */ 236d38ceaf9SAlex Deucher break; 237d38ceaf9SAlex Deucher } 238d38ceaf9SAlex Deucher } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 239d38ceaf9SAlex Deucher 240d38ceaf9SAlex Deucher if (seq < last_emitted) 241d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 242d38ceaf9SAlex Deucher 243d38ceaf9SAlex Deucher return wake; 244d38ceaf9SAlex Deucher } 245d38ceaf9SAlex Deucher 246d38ceaf9SAlex Deucher /** 247d38ceaf9SAlex Deucher * amdgpu_fence_check_lockup - check for hardware lockup 248d38ceaf9SAlex Deucher * 249d38ceaf9SAlex Deucher * @work: delayed work item 250d38ceaf9SAlex Deucher * 251d38ceaf9SAlex Deucher * Checks for fence activity and if there is none probe 252d38ceaf9SAlex Deucher * the hardware if a lockup occured. 253d38ceaf9SAlex Deucher */ 254d38ceaf9SAlex Deucher static void amdgpu_fence_check_lockup(struct work_struct *work) 255d38ceaf9SAlex Deucher { 256d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fence_drv; 257d38ceaf9SAlex Deucher struct amdgpu_ring *ring; 258d38ceaf9SAlex Deucher 259d38ceaf9SAlex Deucher fence_drv = container_of(work, struct amdgpu_fence_driver, 260d38ceaf9SAlex Deucher lockup_work.work); 261d38ceaf9SAlex Deucher ring = fence_drv->ring; 262d38ceaf9SAlex Deucher 263d38ceaf9SAlex Deucher if (!down_read_trylock(&ring->adev->exclusive_lock)) { 264d38ceaf9SAlex Deucher /* just reschedule the check if a reset is going on */ 265d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 266d38ceaf9SAlex Deucher return; 267d38ceaf9SAlex Deucher } 268d38ceaf9SAlex Deucher 2697f06c236Smonk.liu if (amdgpu_fence_activity(ring)) { 2707f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 2717f06c236Smonk.liu } 272d38ceaf9SAlex Deucher up_read(&ring->adev->exclusive_lock); 273d38ceaf9SAlex Deucher } 274d38ceaf9SAlex Deucher 275d38ceaf9SAlex Deucher /** 276d38ceaf9SAlex Deucher * amdgpu_fence_process - process a fence 277d38ceaf9SAlex Deucher * 278d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 279d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 280d38ceaf9SAlex Deucher * 281d38ceaf9SAlex Deucher * Checks the current fence value and wakes the fence queue 282d38ceaf9SAlex Deucher * if the sequence number has increased (all asics). 283d38ceaf9SAlex Deucher */ 284d38ceaf9SAlex Deucher void amdgpu_fence_process(struct amdgpu_ring *ring) 285d38ceaf9SAlex Deucher { 28668ed3de4SChristian König if (amdgpu_fence_activity(ring)) 2877f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 288e0d8f3c3SChunming Zhou } 289d38ceaf9SAlex Deucher 290d38ceaf9SAlex Deucher /** 291d38ceaf9SAlex Deucher * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled 292d38ceaf9SAlex Deucher * 293d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 294d38ceaf9SAlex Deucher * @seq: sequence number 295d38ceaf9SAlex Deucher * 296d38ceaf9SAlex Deucher * Check if the last signaled fence sequnce number is >= the requested 297d38ceaf9SAlex Deucher * sequence number (all asics). 298d38ceaf9SAlex Deucher * Returns true if the fence has signaled (current fence value 299d38ceaf9SAlex Deucher * is >= requested value) or false if it has not (current fence 300d38ceaf9SAlex Deucher * value is < the requested value. Helper function for 301d38ceaf9SAlex Deucher * amdgpu_fence_signaled(). 302d38ceaf9SAlex Deucher */ 303d38ceaf9SAlex Deucher static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) 304d38ceaf9SAlex Deucher { 305d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 306d38ceaf9SAlex Deucher return true; 307d38ceaf9SAlex Deucher 308d38ceaf9SAlex Deucher /* poll new last sequence at least once */ 309d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 310d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 311d38ceaf9SAlex Deucher return true; 312d38ceaf9SAlex Deucher 313d38ceaf9SAlex Deucher return false; 314d38ceaf9SAlex Deucher } 315d38ceaf9SAlex Deucher 316d38ceaf9SAlex Deucher static bool amdgpu_fence_is_signaled(struct fence *f) 317d38ceaf9SAlex Deucher { 318d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 319d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 320d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 321d38ceaf9SAlex Deucher 322d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 323d38ceaf9SAlex Deucher return true; 324d38ceaf9SAlex Deucher 325d38ceaf9SAlex Deucher if (down_read_trylock(&adev->exclusive_lock)) { 326d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 327d38ceaf9SAlex Deucher up_read(&adev->exclusive_lock); 328d38ceaf9SAlex Deucher 329d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 330d38ceaf9SAlex Deucher return true; 331d38ceaf9SAlex Deucher } 332d38ceaf9SAlex Deucher return false; 333d38ceaf9SAlex Deucher } 334d38ceaf9SAlex Deucher 335d38ceaf9SAlex Deucher /** 336d38ceaf9SAlex Deucher * amdgpu_fence_enable_signaling - enable signalling on fence 337d38ceaf9SAlex Deucher * @fence: fence 338d38ceaf9SAlex Deucher * 339d38ceaf9SAlex Deucher * This function is called with fence_queue lock held, and adds a callback 340d38ceaf9SAlex Deucher * to fence_queue that checks if this fence is signaled, and if so it 341d38ceaf9SAlex Deucher * signals the fence and removes itself. 342d38ceaf9SAlex Deucher */ 343d38ceaf9SAlex Deucher static bool amdgpu_fence_enable_signaling(struct fence *f) 344d38ceaf9SAlex Deucher { 345d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 346d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 347d38ceaf9SAlex Deucher 348d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 349d38ceaf9SAlex Deucher return false; 350d38ceaf9SAlex Deucher 351d38ceaf9SAlex Deucher fence->fence_wake.flags = 0; 352d38ceaf9SAlex Deucher fence->fence_wake.private = NULL; 353d38ceaf9SAlex Deucher fence->fence_wake.func = amdgpu_fence_check_signaled; 3547f06c236Smonk.liu __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 355d38ceaf9SAlex Deucher fence_get(f); 356d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 357d38ceaf9SAlex Deucher return true; 358d38ceaf9SAlex Deucher } 359d38ceaf9SAlex Deucher 3607f06c236Smonk.liu /* 3617f06c236Smonk.liu * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal 3627f06c236Smonk.liu * @ring: ring to wait on for the seq number 3637f06c236Smonk.liu * @seq: seq number wait for 364d38ceaf9SAlex Deucher * 3657f06c236Smonk.liu * return value: 36600d2a2b2SChristian König * 0: seq signaled, and gpu not hang 36700d2a2b2SChristian König * -EDEADL: GPU hang detected 3687f06c236Smonk.liu * -EINVAL: some paramter is not valid 369d38ceaf9SAlex Deucher */ 37000d2a2b2SChristian König static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) 371d38ceaf9SAlex Deucher { 3727f06c236Smonk.liu bool signaled = false; 373d38ceaf9SAlex Deucher 3747f06c236Smonk.liu BUG_ON(!ring); 3757f06c236Smonk.liu if (seq > ring->fence_drv.sync_seq[ring->idx]) 3767f06c236Smonk.liu return -EINVAL; 377d38ceaf9SAlex Deucher 3787f06c236Smonk.liu if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 379d38ceaf9SAlex Deucher return 0; 38000d2a2b2SChristian König 38100d2a2b2SChristian König wait_event(ring->fence_drv.fence_queue, ( 382b7e4dad3SChristian König (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 38300d2a2b2SChristian König 38400d2a2b2SChristian König if (signaled) 38500d2a2b2SChristian König return 0; 38600d2a2b2SChristian König else 38700d2a2b2SChristian König return -EDEADLK; 388d38ceaf9SAlex Deucher } 3897f06c236Smonk.liu 390d38ceaf9SAlex Deucher /** 391d38ceaf9SAlex Deucher * amdgpu_fence_wait_next - wait for the next fence to signal 392d38ceaf9SAlex Deucher * 393d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 394d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 395d38ceaf9SAlex Deucher * 396d38ceaf9SAlex Deucher * Wait for the next fence on the requested ring to signal (all asics). 397d38ceaf9SAlex Deucher * Returns 0 if the next fence has passed, error for all other cases. 398d38ceaf9SAlex Deucher * Caller must hold ring lock. 399d38ceaf9SAlex Deucher */ 400d38ceaf9SAlex Deucher int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 401d38ceaf9SAlex Deucher { 4027f06c236Smonk.liu uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 40300d2a2b2SChristian König 4047f06c236Smonk.liu if (seq >= ring->fence_drv.sync_seq[ring->idx]) 405d38ceaf9SAlex Deucher return -ENOENT; 4067f06c236Smonk.liu 40700d2a2b2SChristian König return amdgpu_fence_ring_wait_seq(ring, seq); 408d38ceaf9SAlex Deucher } 409d38ceaf9SAlex Deucher 410d38ceaf9SAlex Deucher /** 411d38ceaf9SAlex Deucher * amdgpu_fence_wait_empty - wait for all fences to signal 412d38ceaf9SAlex Deucher * 413d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 414d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 415d38ceaf9SAlex Deucher * 416d38ceaf9SAlex Deucher * Wait for all fences on the requested ring to signal (all asics). 417d38ceaf9SAlex Deucher * Returns 0 if the fences have passed, error for all other cases. 418d38ceaf9SAlex Deucher * Caller must hold ring lock. 419d38ceaf9SAlex Deucher */ 420d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 421d38ceaf9SAlex Deucher { 4227f06c236Smonk.liu uint64_t seq = ring->fence_drv.sync_seq[ring->idx]; 42300d2a2b2SChristian König 4247f06c236Smonk.liu if (!seq) 425d38ceaf9SAlex Deucher return 0; 426d38ceaf9SAlex Deucher 42700d2a2b2SChristian König return amdgpu_fence_ring_wait_seq(ring, seq); 428d38ceaf9SAlex Deucher } 429d38ceaf9SAlex Deucher 430d38ceaf9SAlex Deucher /** 431d38ceaf9SAlex Deucher * amdgpu_fence_ref - take a ref on a fence 432d38ceaf9SAlex Deucher * 433d38ceaf9SAlex Deucher * @fence: amdgpu fence object 434d38ceaf9SAlex Deucher * 435d38ceaf9SAlex Deucher * Take a reference on a fence (all asics). 436d38ceaf9SAlex Deucher * Returns the fence. 437d38ceaf9SAlex Deucher */ 438d38ceaf9SAlex Deucher struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence) 439d38ceaf9SAlex Deucher { 440d38ceaf9SAlex Deucher fence_get(&fence->base); 441d38ceaf9SAlex Deucher return fence; 442d38ceaf9SAlex Deucher } 443d38ceaf9SAlex Deucher 444d38ceaf9SAlex Deucher /** 445d38ceaf9SAlex Deucher * amdgpu_fence_unref - remove a ref on a fence 446d38ceaf9SAlex Deucher * 447d38ceaf9SAlex Deucher * @fence: amdgpu fence object 448d38ceaf9SAlex Deucher * 449d38ceaf9SAlex Deucher * Remove a reference on a fence (all asics). 450d38ceaf9SAlex Deucher */ 451d38ceaf9SAlex Deucher void amdgpu_fence_unref(struct amdgpu_fence **fence) 452d38ceaf9SAlex Deucher { 453d38ceaf9SAlex Deucher struct amdgpu_fence *tmp = *fence; 454d38ceaf9SAlex Deucher 455d38ceaf9SAlex Deucher *fence = NULL; 456d38ceaf9SAlex Deucher if (tmp) 457d38ceaf9SAlex Deucher fence_put(&tmp->base); 458d38ceaf9SAlex Deucher } 459d38ceaf9SAlex Deucher 460d38ceaf9SAlex Deucher /** 461d38ceaf9SAlex Deucher * amdgpu_fence_count_emitted - get the count of emitted fences 462d38ceaf9SAlex Deucher * 463d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 464d38ceaf9SAlex Deucher * 465d38ceaf9SAlex Deucher * Get the number of fences emitted on the requested ring (all asics). 466d38ceaf9SAlex Deucher * Returns the number of emitted fences on the ring. Used by the 467d38ceaf9SAlex Deucher * dynpm code to ring track activity. 468d38ceaf9SAlex Deucher */ 469d38ceaf9SAlex Deucher unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 470d38ceaf9SAlex Deucher { 471d38ceaf9SAlex Deucher uint64_t emitted; 472d38ceaf9SAlex Deucher 473d38ceaf9SAlex Deucher /* We are not protected by ring lock when reading the last sequence 474d38ceaf9SAlex Deucher * but it's ok to report slightly wrong fence count here. 475d38ceaf9SAlex Deucher */ 476d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 477d38ceaf9SAlex Deucher emitted = ring->fence_drv.sync_seq[ring->idx] 478d38ceaf9SAlex Deucher - atomic64_read(&ring->fence_drv.last_seq); 479d38ceaf9SAlex Deucher /* to avoid 32bits warp around */ 480d38ceaf9SAlex Deucher if (emitted > 0x10000000) 481d38ceaf9SAlex Deucher emitted = 0x10000000; 482d38ceaf9SAlex Deucher 483d38ceaf9SAlex Deucher return (unsigned)emitted; 484d38ceaf9SAlex Deucher } 485d38ceaf9SAlex Deucher 486d38ceaf9SAlex Deucher /** 487d38ceaf9SAlex Deucher * amdgpu_fence_need_sync - do we need a semaphore 488d38ceaf9SAlex Deucher * 489d38ceaf9SAlex Deucher * @fence: amdgpu fence object 490d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 491d38ceaf9SAlex Deucher * 492d38ceaf9SAlex Deucher * Check if the fence needs to be synced against another ring 493d38ceaf9SAlex Deucher * (all asics). If so, we need to emit a semaphore. 494d38ceaf9SAlex Deucher * Returns true if we need to sync with another ring, false if 495d38ceaf9SAlex Deucher * not. 496d38ceaf9SAlex Deucher */ 497d38ceaf9SAlex Deucher bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 498d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 499d38ceaf9SAlex Deucher { 500d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fdrv; 501d38ceaf9SAlex Deucher 502d38ceaf9SAlex Deucher if (!fence) 503d38ceaf9SAlex Deucher return false; 504d38ceaf9SAlex Deucher 505d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 506d38ceaf9SAlex Deucher return false; 507d38ceaf9SAlex Deucher 508d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 509d38ceaf9SAlex Deucher fdrv = &dst_ring->fence_drv; 510d38ceaf9SAlex Deucher if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) 511d38ceaf9SAlex Deucher return false; 512d38ceaf9SAlex Deucher 513d38ceaf9SAlex Deucher return true; 514d38ceaf9SAlex Deucher } 515d38ceaf9SAlex Deucher 516d38ceaf9SAlex Deucher /** 517d38ceaf9SAlex Deucher * amdgpu_fence_note_sync - record the sync point 518d38ceaf9SAlex Deucher * 519d38ceaf9SAlex Deucher * @fence: amdgpu fence object 520d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 521d38ceaf9SAlex Deucher * 522d38ceaf9SAlex Deucher * Note the sequence number at which point the fence will 523d38ceaf9SAlex Deucher * be synced with the requested ring (all asics). 524d38ceaf9SAlex Deucher */ 525d38ceaf9SAlex Deucher void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 526d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 527d38ceaf9SAlex Deucher { 528d38ceaf9SAlex Deucher struct amdgpu_fence_driver *dst, *src; 529d38ceaf9SAlex Deucher unsigned i; 530d38ceaf9SAlex Deucher 531d38ceaf9SAlex Deucher if (!fence) 532d38ceaf9SAlex Deucher return; 533d38ceaf9SAlex Deucher 534d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 535d38ceaf9SAlex Deucher return; 536d38ceaf9SAlex Deucher 537d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 538d38ceaf9SAlex Deucher src = &fence->ring->fence_drv; 539d38ceaf9SAlex Deucher dst = &dst_ring->fence_drv; 540d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 541d38ceaf9SAlex Deucher if (i == dst_ring->idx) 542d38ceaf9SAlex Deucher continue; 543d38ceaf9SAlex Deucher 544d38ceaf9SAlex Deucher dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 545d38ceaf9SAlex Deucher } 546d38ceaf9SAlex Deucher } 547d38ceaf9SAlex Deucher 548d38ceaf9SAlex Deucher /** 549d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring - make the fence driver 550d38ceaf9SAlex Deucher * ready for use on the requested ring. 551d38ceaf9SAlex Deucher * 552d38ceaf9SAlex Deucher * @ring: ring to start the fence driver on 553d38ceaf9SAlex Deucher * @irq_src: interrupt source to use for this ring 554d38ceaf9SAlex Deucher * @irq_type: interrupt type to use for this ring 555d38ceaf9SAlex Deucher * 556d38ceaf9SAlex Deucher * Make the fence driver ready for processing (all asics). 557d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 558d38ceaf9SAlex Deucher * start the fence driver on the rings it has. 559d38ceaf9SAlex Deucher * Returns 0 for success, errors for failure. 560d38ceaf9SAlex Deucher */ 561d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 562d38ceaf9SAlex Deucher struct amdgpu_irq_src *irq_src, 563d38ceaf9SAlex Deucher unsigned irq_type) 564d38ceaf9SAlex Deucher { 565d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 566d38ceaf9SAlex Deucher uint64_t index; 567d38ceaf9SAlex Deucher 568d38ceaf9SAlex Deucher if (ring != &adev->uvd.ring) { 569d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 570d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 571d38ceaf9SAlex Deucher } else { 572d38ceaf9SAlex Deucher /* put fence directly behind firmware */ 573d38ceaf9SAlex Deucher index = ALIGN(adev->uvd.fw->size, 8); 574d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 575d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 576d38ceaf9SAlex Deucher } 577d38ceaf9SAlex Deucher amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 578c6a4079bSChunming Zhou amdgpu_irq_get(adev, irq_src, irq_type); 579c6a4079bSChunming Zhou 580d38ceaf9SAlex Deucher ring->fence_drv.irq_src = irq_src; 581d38ceaf9SAlex Deucher ring->fence_drv.irq_type = irq_type; 582c6a4079bSChunming Zhou ring->fence_drv.initialized = true; 583c6a4079bSChunming Zhou 584d38ceaf9SAlex Deucher dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 585d38ceaf9SAlex Deucher "cpu addr 0x%p\n", ring->idx, 586d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 587d38ceaf9SAlex Deucher return 0; 588d38ceaf9SAlex Deucher } 589d38ceaf9SAlex Deucher 590d38ceaf9SAlex Deucher /** 591d38ceaf9SAlex Deucher * amdgpu_fence_driver_init_ring - init the fence driver 592d38ceaf9SAlex Deucher * for the requested ring. 593d38ceaf9SAlex Deucher * 594d38ceaf9SAlex Deucher * @ring: ring to init the fence driver on 595d38ceaf9SAlex Deucher * 596d38ceaf9SAlex Deucher * Init the fence driver for the requested ring (all asics). 597d38ceaf9SAlex Deucher * Helper function for amdgpu_fence_driver_init(). 598d38ceaf9SAlex Deucher */ 5994f839a24SChristian König int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 600d38ceaf9SAlex Deucher { 6014f839a24SChristian König int i, r; 602d38ceaf9SAlex Deucher 603d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = NULL; 604d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = 0; 605d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 606d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i] = 0; 607d38ceaf9SAlex Deucher 608d38ceaf9SAlex Deucher atomic64_set(&ring->fence_drv.last_seq, 0); 609d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 610d38ceaf9SAlex Deucher 611d38ceaf9SAlex Deucher INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 612d38ceaf9SAlex Deucher amdgpu_fence_check_lockup); 613d38ceaf9SAlex Deucher ring->fence_drv.ring = ring; 614b80d8475SAlex Deucher 6155ec92a76SChristian König init_waitqueue_head(&ring->fence_drv.fence_queue); 6165ec92a76SChristian König 617b80d8475SAlex Deucher if (amdgpu_enable_scheduler) { 6182440ff2cSJunwei Zhang long timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 6192440ff2cSJunwei Zhang if (timeout == 0) { 6202440ff2cSJunwei Zhang /* 6212440ff2cSJunwei Zhang * FIXME: 6222440ff2cSJunwei Zhang * Delayed workqueue cannot use it directly, 6232440ff2cSJunwei Zhang * so the scheduler will not use delayed workqueue if 6242440ff2cSJunwei Zhang * MAX_SCHEDULE_TIMEOUT is set. 6252440ff2cSJunwei Zhang * Currently keep it simple and silly. 6262440ff2cSJunwei Zhang */ 6272440ff2cSJunwei Zhang timeout = MAX_SCHEDULE_TIMEOUT; 6282440ff2cSJunwei Zhang } 6294f839a24SChristian König r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 6302440ff2cSJunwei Zhang amdgpu_sched_hw_submission, 6312440ff2cSJunwei Zhang timeout, ring->name); 6324f839a24SChristian König if (r) { 6334f839a24SChristian König DRM_ERROR("Failed to create scheduler on ring %s.\n", 6344f839a24SChristian König ring->name); 6354f839a24SChristian König return r; 636b80d8475SAlex Deucher } 637d38ceaf9SAlex Deucher } 638d38ceaf9SAlex Deucher 6394f839a24SChristian König return 0; 6404f839a24SChristian König } 6414f839a24SChristian König 642d38ceaf9SAlex Deucher /** 643d38ceaf9SAlex Deucher * amdgpu_fence_driver_init - init the fence driver 644d38ceaf9SAlex Deucher * for all possible rings. 645d38ceaf9SAlex Deucher * 646d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 647d38ceaf9SAlex Deucher * 648d38ceaf9SAlex Deucher * Init the fence driver for all possible rings (all asics). 649d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 650d38ceaf9SAlex Deucher * start the fence driver on the rings it has using 651d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring(). 652d38ceaf9SAlex Deucher * Returns 0 for success. 653d38ceaf9SAlex Deucher */ 654d38ceaf9SAlex Deucher int amdgpu_fence_driver_init(struct amdgpu_device *adev) 655d38ceaf9SAlex Deucher { 656d38ceaf9SAlex Deucher if (amdgpu_debugfs_fence_init(adev)) 657d38ceaf9SAlex Deucher dev_err(adev->dev, "fence debugfs file creation failed\n"); 658d38ceaf9SAlex Deucher 659d38ceaf9SAlex Deucher return 0; 660d38ceaf9SAlex Deucher } 661d38ceaf9SAlex Deucher 662d38ceaf9SAlex Deucher /** 663d38ceaf9SAlex Deucher * amdgpu_fence_driver_fini - tear down the fence driver 664d38ceaf9SAlex Deucher * for all possible rings. 665d38ceaf9SAlex Deucher * 666d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 667d38ceaf9SAlex Deucher * 668d38ceaf9SAlex Deucher * Tear down the fence driver for all possible rings (all asics). 669d38ceaf9SAlex Deucher */ 670d38ceaf9SAlex Deucher void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 671d38ceaf9SAlex Deucher { 672d38ceaf9SAlex Deucher int i, r; 673d38ceaf9SAlex Deucher 674d38ceaf9SAlex Deucher mutex_lock(&adev->ring_lock); 675d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 676d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 677d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 678d38ceaf9SAlex Deucher continue; 679d38ceaf9SAlex Deucher r = amdgpu_fence_wait_empty(ring); 680d38ceaf9SAlex Deucher if (r) { 681d38ceaf9SAlex Deucher /* no need to trigger GPU reset as we are unloading */ 682d38ceaf9SAlex Deucher amdgpu_fence_driver_force_completion(adev); 683d38ceaf9SAlex Deucher } 6847f06c236Smonk.liu wake_up_all(&ring->fence_drv.fence_queue); 685c6a4079bSChunming Zhou amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686c6a4079bSChunming Zhou ring->fence_drv.irq_type); 6874f839a24SChristian König amd_sched_fini(&ring->sched); 688d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 689d38ceaf9SAlex Deucher } 690d38ceaf9SAlex Deucher mutex_unlock(&adev->ring_lock); 691d38ceaf9SAlex Deucher } 692d38ceaf9SAlex Deucher 693d38ceaf9SAlex Deucher /** 6945ceb54c6SAlex Deucher * amdgpu_fence_driver_suspend - suspend the fence driver 6955ceb54c6SAlex Deucher * for all possible rings. 6965ceb54c6SAlex Deucher * 6975ceb54c6SAlex Deucher * @adev: amdgpu device pointer 6985ceb54c6SAlex Deucher * 6995ceb54c6SAlex Deucher * Suspend the fence driver for all possible rings (all asics). 7005ceb54c6SAlex Deucher */ 7015ceb54c6SAlex Deucher void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 7025ceb54c6SAlex Deucher { 7035ceb54c6SAlex Deucher int i, r; 7045ceb54c6SAlex Deucher 7055ceb54c6SAlex Deucher mutex_lock(&adev->ring_lock); 7065ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 7075ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 7085ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 7095ceb54c6SAlex Deucher continue; 7105ceb54c6SAlex Deucher 7115ceb54c6SAlex Deucher /* wait for gpu to finish processing current batch */ 7125ceb54c6SAlex Deucher r = amdgpu_fence_wait_empty(ring); 7135ceb54c6SAlex Deucher if (r) { 7145ceb54c6SAlex Deucher /* delay GPU reset to resume */ 7155ceb54c6SAlex Deucher amdgpu_fence_driver_force_completion(adev); 7165ceb54c6SAlex Deucher } 7175ceb54c6SAlex Deucher 7185ceb54c6SAlex Deucher /* disable the interrupt */ 7195ceb54c6SAlex Deucher amdgpu_irq_put(adev, ring->fence_drv.irq_src, 7205ceb54c6SAlex Deucher ring->fence_drv.irq_type); 7215ceb54c6SAlex Deucher } 7225ceb54c6SAlex Deucher mutex_unlock(&adev->ring_lock); 7235ceb54c6SAlex Deucher } 7245ceb54c6SAlex Deucher 7255ceb54c6SAlex Deucher /** 7265ceb54c6SAlex Deucher * amdgpu_fence_driver_resume - resume the fence driver 7275ceb54c6SAlex Deucher * for all possible rings. 7285ceb54c6SAlex Deucher * 7295ceb54c6SAlex Deucher * @adev: amdgpu device pointer 7305ceb54c6SAlex Deucher * 7315ceb54c6SAlex Deucher * Resume the fence driver for all possible rings (all asics). 7325ceb54c6SAlex Deucher * Not all asics have all rings, so each asic will only 7335ceb54c6SAlex Deucher * start the fence driver on the rings it has using 7345ceb54c6SAlex Deucher * amdgpu_fence_driver_start_ring(). 7355ceb54c6SAlex Deucher * Returns 0 for success. 7365ceb54c6SAlex Deucher */ 7375ceb54c6SAlex Deucher void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 7385ceb54c6SAlex Deucher { 7395ceb54c6SAlex Deucher int i; 7405ceb54c6SAlex Deucher 7415ceb54c6SAlex Deucher mutex_lock(&adev->ring_lock); 7425ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 7435ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 7445ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 7455ceb54c6SAlex Deucher continue; 7465ceb54c6SAlex Deucher 7475ceb54c6SAlex Deucher /* enable the interrupt */ 7485ceb54c6SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 7495ceb54c6SAlex Deucher ring->fence_drv.irq_type); 7505ceb54c6SAlex Deucher } 7515ceb54c6SAlex Deucher mutex_unlock(&adev->ring_lock); 7525ceb54c6SAlex Deucher } 7535ceb54c6SAlex Deucher 7545ceb54c6SAlex Deucher /** 755d38ceaf9SAlex Deucher * amdgpu_fence_driver_force_completion - force all fence waiter to complete 756d38ceaf9SAlex Deucher * 757d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 758d38ceaf9SAlex Deucher * 759d38ceaf9SAlex Deucher * In case of GPU reset failure make sure no process keep waiting on fence 760d38ceaf9SAlex Deucher * that will never complete. 761d38ceaf9SAlex Deucher */ 762d38ceaf9SAlex Deucher void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) 763d38ceaf9SAlex Deucher { 764d38ceaf9SAlex Deucher int i; 765d38ceaf9SAlex Deucher 766d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 767d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 768d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 769d38ceaf9SAlex Deucher continue; 770d38ceaf9SAlex Deucher 771d38ceaf9SAlex Deucher amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); 772d38ceaf9SAlex Deucher } 773d38ceaf9SAlex Deucher } 774d38ceaf9SAlex Deucher 775d38ceaf9SAlex Deucher 776d38ceaf9SAlex Deucher /* 777d38ceaf9SAlex Deucher * Fence debugfs 778d38ceaf9SAlex Deucher */ 779d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 780d38ceaf9SAlex Deucher static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 781d38ceaf9SAlex Deucher { 782d38ceaf9SAlex Deucher struct drm_info_node *node = (struct drm_info_node *)m->private; 783d38ceaf9SAlex Deucher struct drm_device *dev = node->minor->dev; 784d38ceaf9SAlex Deucher struct amdgpu_device *adev = dev->dev_private; 785d38ceaf9SAlex Deucher int i, j; 786d38ceaf9SAlex Deucher 787d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 788d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 789d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 790d38ceaf9SAlex Deucher continue; 791d38ceaf9SAlex Deucher 792d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 793d38ceaf9SAlex Deucher 794344c19f9SChristian König seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 795d38ceaf9SAlex Deucher seq_printf(m, "Last signaled fence 0x%016llx\n", 796d38ceaf9SAlex Deucher (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 797d38ceaf9SAlex Deucher seq_printf(m, "Last emitted 0x%016llx\n", 798d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i]); 799d38ceaf9SAlex Deucher 800d38ceaf9SAlex Deucher for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 801d38ceaf9SAlex Deucher struct amdgpu_ring *other = adev->rings[j]; 802344c19f9SChristian König if (i != j && other && other->fence_drv.initialized && 803344c19f9SChristian König ring->fence_drv.sync_seq[j]) 804d38ceaf9SAlex Deucher seq_printf(m, "Last sync to ring %d 0x%016llx\n", 805d38ceaf9SAlex Deucher j, ring->fence_drv.sync_seq[j]); 806d38ceaf9SAlex Deucher } 807d38ceaf9SAlex Deucher } 808d38ceaf9SAlex Deucher return 0; 809d38ceaf9SAlex Deucher } 810d38ceaf9SAlex Deucher 811d38ceaf9SAlex Deucher static struct drm_info_list amdgpu_debugfs_fence_list[] = { 812d38ceaf9SAlex Deucher {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 813d38ceaf9SAlex Deucher }; 814d38ceaf9SAlex Deucher #endif 815d38ceaf9SAlex Deucher 816d38ceaf9SAlex Deucher int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 817d38ceaf9SAlex Deucher { 818d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 819d38ceaf9SAlex Deucher return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); 820d38ceaf9SAlex Deucher #else 821d38ceaf9SAlex Deucher return 0; 822d38ceaf9SAlex Deucher #endif 823d38ceaf9SAlex Deucher } 824d38ceaf9SAlex Deucher 825d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_driver_name(struct fence *fence) 826d38ceaf9SAlex Deucher { 827d38ceaf9SAlex Deucher return "amdgpu"; 828d38ceaf9SAlex Deucher } 829d38ceaf9SAlex Deucher 830d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_timeline_name(struct fence *f) 831d38ceaf9SAlex Deucher { 832d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 833d38ceaf9SAlex Deucher return (const char *)fence->ring->name; 834d38ceaf9SAlex Deucher } 835d38ceaf9SAlex Deucher 836d38ceaf9SAlex Deucher static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) 837d38ceaf9SAlex Deucher { 838d38ceaf9SAlex Deucher return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 839d38ceaf9SAlex Deucher } 840d38ceaf9SAlex Deucher 8414ce9891eSChunming Zhou static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count) 842332dfe90Smonk.liu { 843332dfe90Smonk.liu int idx; 8444ce9891eSChunming Zhou struct fence *fence; 845332dfe90Smonk.liu 8461aa4051bSJunwei Zhang for (idx = 0; idx < count; ++idx) { 847332dfe90Smonk.liu fence = fences[idx]; 848332dfe90Smonk.liu if (fence) { 8494ce9891eSChunming Zhou if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 850332dfe90Smonk.liu return true; 851332dfe90Smonk.liu } 852332dfe90Smonk.liu } 853332dfe90Smonk.liu return false; 854332dfe90Smonk.liu } 855332dfe90Smonk.liu 856d38ceaf9SAlex Deucher struct amdgpu_wait_cb { 857d38ceaf9SAlex Deucher struct fence_cb base; 858d38ceaf9SAlex Deucher struct task_struct *task; 859d38ceaf9SAlex Deucher }; 860d38ceaf9SAlex Deucher 861d38ceaf9SAlex Deucher static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) 862d38ceaf9SAlex Deucher { 863d38ceaf9SAlex Deucher struct amdgpu_wait_cb *wait = 864d38ceaf9SAlex Deucher container_of(cb, struct amdgpu_wait_cb, base); 865d38ceaf9SAlex Deucher wake_up_process(wait->task); 866d38ceaf9SAlex Deucher } 867d38ceaf9SAlex Deucher 868d38ceaf9SAlex Deucher static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 869d38ceaf9SAlex Deucher signed long t) 870d38ceaf9SAlex Deucher { 871d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 872d38ceaf9SAlex Deucher struct amdgpu_device *adev = fence->ring->adev; 873d38ceaf9SAlex Deucher 8748221d706SChristian König return amdgpu_fence_wait_any(adev, &f, 1, intr, t); 875d38ceaf9SAlex Deucher } 876d38ceaf9SAlex Deucher 8771aa4051bSJunwei Zhang /** 8781aa4051bSJunwei Zhang * Wait the fence array with timeout 8791aa4051bSJunwei Zhang * 8801aa4051bSJunwei Zhang * @adev: amdgpu device 8811aa4051bSJunwei Zhang * @array: the fence array with amdgpu fence pointer 8821aa4051bSJunwei Zhang * @count: the number of the fence array 8831aa4051bSJunwei Zhang * @intr: when sleep, set the current task interruptable or not 8841aa4051bSJunwei Zhang * @t: timeout to wait 8851aa4051bSJunwei Zhang * 8868221d706SChristian König * It will return when any fence is signaled or timeout. 8871aa4051bSJunwei Zhang */ 8888221d706SChristian König signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, 8898221d706SChristian König struct fence **array, uint32_t count, 8908221d706SChristian König bool intr, signed long t) 891332dfe90Smonk.liu { 8921aa4051bSJunwei Zhang struct amdgpu_wait_cb *cb; 8934ce9891eSChunming Zhou struct fence *fence; 8948221d706SChristian König unsigned idx; 895332dfe90Smonk.liu 896332dfe90Smonk.liu BUG_ON(!array); 897332dfe90Smonk.liu 8981aa4051bSJunwei Zhang cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL); 8991aa4051bSJunwei Zhang if (cb == NULL) { 9001aa4051bSJunwei Zhang t = -ENOMEM; 9011aa4051bSJunwei Zhang goto err_free_cb; 9021aa4051bSJunwei Zhang } 9031aa4051bSJunwei Zhang 9041aa4051bSJunwei Zhang for (idx = 0; idx < count; ++idx) { 905332dfe90Smonk.liu fence = array[idx]; 906332dfe90Smonk.liu if (fence) { 907332dfe90Smonk.liu cb[idx].task = current; 9084ce9891eSChunming Zhou if (fence_add_callback(fence, 9091aa4051bSJunwei Zhang &cb[idx].base, amdgpu_fence_wait_cb)) { 9101aa4051bSJunwei Zhang /* The fence is already signaled */ 9111aa4051bSJunwei Zhang goto fence_rm_cb; 9121aa4051bSJunwei Zhang } 913332dfe90Smonk.liu } 914332dfe90Smonk.liu } 915332dfe90Smonk.liu 916332dfe90Smonk.liu while (t > 0) { 917332dfe90Smonk.liu if (intr) 918332dfe90Smonk.liu set_current_state(TASK_INTERRUPTIBLE); 919332dfe90Smonk.liu else 920332dfe90Smonk.liu set_current_state(TASK_UNINTERRUPTIBLE); 921332dfe90Smonk.liu 922332dfe90Smonk.liu /* 923332dfe90Smonk.liu * amdgpu_test_signaled_any must be called after 924332dfe90Smonk.liu * set_current_state to prevent a race with wake_up_process 925332dfe90Smonk.liu */ 9268221d706SChristian König if (amdgpu_test_signaled_any(array, count)) 927332dfe90Smonk.liu break; 928332dfe90Smonk.liu 929332dfe90Smonk.liu t = schedule_timeout(t); 930332dfe90Smonk.liu 931332dfe90Smonk.liu if (t > 0 && intr && signal_pending(current)) 932332dfe90Smonk.liu t = -ERESTARTSYS; 933332dfe90Smonk.liu } 934332dfe90Smonk.liu 935332dfe90Smonk.liu __set_current_state(TASK_RUNNING); 936332dfe90Smonk.liu 9371aa4051bSJunwei Zhang fence_rm_cb: 9381aa4051bSJunwei Zhang for (idx = 0; idx < count; ++idx) { 939332dfe90Smonk.liu fence = array[idx]; 940113cd9daSJunwei Zhang if (fence && cb[idx].base.func) 9414ce9891eSChunming Zhou fence_remove_callback(fence, &cb[idx].base); 942332dfe90Smonk.liu } 943332dfe90Smonk.liu 9441aa4051bSJunwei Zhang err_free_cb: 9451aa4051bSJunwei Zhang kfree(cb); 9461aa4051bSJunwei Zhang 947332dfe90Smonk.liu return t; 948332dfe90Smonk.liu } 949332dfe90Smonk.liu 950d38ceaf9SAlex Deucher const struct fence_ops amdgpu_fence_ops = { 951d38ceaf9SAlex Deucher .get_driver_name = amdgpu_fence_get_driver_name, 952d38ceaf9SAlex Deucher .get_timeline_name = amdgpu_fence_get_timeline_name, 953d38ceaf9SAlex Deucher .enable_signaling = amdgpu_fence_enable_signaling, 954d38ceaf9SAlex Deucher .signaled = amdgpu_fence_is_signaled, 955d38ceaf9SAlex Deucher .wait = amdgpu_fence_default_wait, 956d38ceaf9SAlex Deucher .release = NULL, 957d38ceaf9SAlex Deucher }; 958