1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher * Dave Airlie 30d38ceaf9SAlex Deucher */ 31d38ceaf9SAlex Deucher #include <linux/seq_file.h> 32d38ceaf9SAlex Deucher #include <linux/atomic.h> 33d38ceaf9SAlex Deucher #include <linux/wait.h> 34d38ceaf9SAlex Deucher #include <linux/kref.h> 35d38ceaf9SAlex Deucher #include <linux/slab.h> 36d38ceaf9SAlex Deucher #include <linux/firmware.h> 37d38ceaf9SAlex Deucher #include <drm/drmP.h> 38d38ceaf9SAlex Deucher #include "amdgpu.h" 39d38ceaf9SAlex Deucher #include "amdgpu_trace.h" 40d38ceaf9SAlex Deucher 41d38ceaf9SAlex Deucher /* 42d38ceaf9SAlex Deucher * Fences 43d38ceaf9SAlex Deucher * Fences mark an event in the GPUs pipeline and are used 44d38ceaf9SAlex Deucher * for GPU/CPU synchronization. When the fence is written, 45d38ceaf9SAlex Deucher * it is expected that all buffers associated with that fence 46d38ceaf9SAlex Deucher * are no longer in use by the associated ring on the GPU and 47d38ceaf9SAlex Deucher * that the the relevant GPU caches have been flushed. 48d38ceaf9SAlex Deucher */ 49d38ceaf9SAlex Deucher 50d38ceaf9SAlex Deucher /** 51d38ceaf9SAlex Deucher * amdgpu_fence_write - write a fence value 52d38ceaf9SAlex Deucher * 53d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 54d38ceaf9SAlex Deucher * @seq: sequence number to write 55d38ceaf9SAlex Deucher * 56d38ceaf9SAlex Deucher * Writes a fence value to memory (all asics). 57d38ceaf9SAlex Deucher */ 58d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 59d38ceaf9SAlex Deucher { 60d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 61d38ceaf9SAlex Deucher 62d38ceaf9SAlex Deucher if (drv->cpu_addr) 63d38ceaf9SAlex Deucher *drv->cpu_addr = cpu_to_le32(seq); 64d38ceaf9SAlex Deucher } 65d38ceaf9SAlex Deucher 66d38ceaf9SAlex Deucher /** 67d38ceaf9SAlex Deucher * amdgpu_fence_read - read a fence value 68d38ceaf9SAlex Deucher * 69d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 70d38ceaf9SAlex Deucher * 71d38ceaf9SAlex Deucher * Reads a fence value from memory (all asics). 72d38ceaf9SAlex Deucher * Returns the value of the fence read from memory. 73d38ceaf9SAlex Deucher */ 74d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 75d38ceaf9SAlex Deucher { 76d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 77d38ceaf9SAlex Deucher u32 seq = 0; 78d38ceaf9SAlex Deucher 79d38ceaf9SAlex Deucher if (drv->cpu_addr) 80d38ceaf9SAlex Deucher seq = le32_to_cpu(*drv->cpu_addr); 81d38ceaf9SAlex Deucher else 82d38ceaf9SAlex Deucher seq = lower_32_bits(atomic64_read(&drv->last_seq)); 83d38ceaf9SAlex Deucher 84d38ceaf9SAlex Deucher return seq; 85d38ceaf9SAlex Deucher } 86d38ceaf9SAlex Deucher 87d38ceaf9SAlex Deucher /** 88d38ceaf9SAlex Deucher * amdgpu_fence_schedule_check - schedule lockup check 89d38ceaf9SAlex Deucher * 90d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 91d38ceaf9SAlex Deucher * 92d38ceaf9SAlex Deucher * Queues a delayed work item to check for lockups. 93d38ceaf9SAlex Deucher */ 94d38ceaf9SAlex Deucher static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) 95d38ceaf9SAlex Deucher { 96d38ceaf9SAlex Deucher /* 97d38ceaf9SAlex Deucher * Do not reset the timer here with mod_delayed_work, 98d38ceaf9SAlex Deucher * this can livelock in an interaction with TTM delayed destroy. 99d38ceaf9SAlex Deucher */ 100d38ceaf9SAlex Deucher queue_delayed_work(system_power_efficient_wq, 101d38ceaf9SAlex Deucher &ring->fence_drv.lockup_work, 102d38ceaf9SAlex Deucher AMDGPU_FENCE_JIFFIES_TIMEOUT); 103d38ceaf9SAlex Deucher } 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher /** 106d38ceaf9SAlex Deucher * amdgpu_fence_emit - emit a fence on the requested ring 107d38ceaf9SAlex Deucher * 108d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 109d38ceaf9SAlex Deucher * @owner: creator of the fence 110d38ceaf9SAlex Deucher * @fence: amdgpu fence object 111d38ceaf9SAlex Deucher * 112d38ceaf9SAlex Deucher * Emits a fence command on the requested ring (all asics). 113d38ceaf9SAlex Deucher * Returns 0 on success, -ENOMEM on failure. 114d38ceaf9SAlex Deucher */ 115d38ceaf9SAlex Deucher int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 116d38ceaf9SAlex Deucher struct amdgpu_fence **fence) 117d38ceaf9SAlex Deucher { 118d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 119d38ceaf9SAlex Deucher 120d38ceaf9SAlex Deucher /* we are protected by the ring emission mutex */ 121d38ceaf9SAlex Deucher *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 122d38ceaf9SAlex Deucher if ((*fence) == NULL) { 123d38ceaf9SAlex Deucher return -ENOMEM; 124d38ceaf9SAlex Deucher } 125d38ceaf9SAlex Deucher (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; 126d38ceaf9SAlex Deucher (*fence)->ring = ring; 127d38ceaf9SAlex Deucher (*fence)->owner = owner; 128d38ceaf9SAlex Deucher fence_init(&(*fence)->base, &amdgpu_fence_ops, 129d38ceaf9SAlex Deucher &adev->fence_queue.lock, adev->fence_context + ring->idx, 130d38ceaf9SAlex Deucher (*fence)->seq); 131890ee23fSChunming Zhou amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 132890ee23fSChunming Zhou (*fence)->seq, 133890ee23fSChunming Zhou AMDGPU_FENCE_FLAG_INT); 134d38ceaf9SAlex Deucher trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 135d38ceaf9SAlex Deucher return 0; 136d38ceaf9SAlex Deucher } 137d38ceaf9SAlex Deucher 138d38ceaf9SAlex Deucher /** 139d38ceaf9SAlex Deucher * amdgpu_fence_check_signaled - callback from fence_queue 140d38ceaf9SAlex Deucher * 141d38ceaf9SAlex Deucher * this function is called with fence_queue lock held, which is also used 142d38ceaf9SAlex Deucher * for the fence locking itself, so unlocked variants are used for 143d38ceaf9SAlex Deucher * fence_signal, and remove_wait_queue. 144d38ceaf9SAlex Deucher */ 145d38ceaf9SAlex Deucher static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 146d38ceaf9SAlex Deucher { 147d38ceaf9SAlex Deucher struct amdgpu_fence *fence; 148d38ceaf9SAlex Deucher struct amdgpu_device *adev; 149d38ceaf9SAlex Deucher u64 seq; 150d38ceaf9SAlex Deucher int ret; 151d38ceaf9SAlex Deucher 152d38ceaf9SAlex Deucher fence = container_of(wait, struct amdgpu_fence, fence_wake); 153d38ceaf9SAlex Deucher adev = fence->ring->adev; 154d38ceaf9SAlex Deucher 155d38ceaf9SAlex Deucher /* 156d38ceaf9SAlex Deucher * We cannot use amdgpu_fence_process here because we're already 157d38ceaf9SAlex Deucher * in the waitqueue, in a call from wake_up_all. 158d38ceaf9SAlex Deucher */ 159d38ceaf9SAlex Deucher seq = atomic64_read(&fence->ring->fence_drv.last_seq); 160d38ceaf9SAlex Deucher if (seq >= fence->seq) { 161d38ceaf9SAlex Deucher ret = fence_signal_locked(&fence->base); 162d38ceaf9SAlex Deucher if (!ret) 163d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "signaled from irq context\n"); 164d38ceaf9SAlex Deucher else 165d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "was already signaled\n"); 166d38ceaf9SAlex Deucher 167d38ceaf9SAlex Deucher amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src, 168d38ceaf9SAlex Deucher fence->ring->fence_drv.irq_type); 169d38ceaf9SAlex Deucher __remove_wait_queue(&adev->fence_queue, &fence->fence_wake); 170d38ceaf9SAlex Deucher fence_put(&fence->base); 171d38ceaf9SAlex Deucher } else 172d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "pending\n"); 173d38ceaf9SAlex Deucher return 0; 174d38ceaf9SAlex Deucher } 175d38ceaf9SAlex Deucher 176d38ceaf9SAlex Deucher /** 177d38ceaf9SAlex Deucher * amdgpu_fence_activity - check for fence activity 178d38ceaf9SAlex Deucher * 179d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 180d38ceaf9SAlex Deucher * 181d38ceaf9SAlex Deucher * Checks the current fence value and calculates the last 182d38ceaf9SAlex Deucher * signalled fence value. Returns true if activity occured 183d38ceaf9SAlex Deucher * on the ring, and the fence_queue should be waken up. 184d38ceaf9SAlex Deucher */ 185d38ceaf9SAlex Deucher static bool amdgpu_fence_activity(struct amdgpu_ring *ring) 186d38ceaf9SAlex Deucher { 187d38ceaf9SAlex Deucher uint64_t seq, last_seq, last_emitted; 188d38ceaf9SAlex Deucher unsigned count_loop = 0; 189d38ceaf9SAlex Deucher bool wake = false; 190d38ceaf9SAlex Deucher 191d38ceaf9SAlex Deucher /* Note there is a scenario here for an infinite loop but it's 192d38ceaf9SAlex Deucher * very unlikely to happen. For it to happen, the current polling 193d38ceaf9SAlex Deucher * process need to be interrupted by another process and another 194d38ceaf9SAlex Deucher * process needs to update the last_seq btw the atomic read and 195d38ceaf9SAlex Deucher * xchg of the current process. 196d38ceaf9SAlex Deucher * 197d38ceaf9SAlex Deucher * More over for this to go in infinite loop there need to be 19886c2b790SJammy Zhou * continuously new fence signaled ie amdgpu_fence_read needs 199d38ceaf9SAlex Deucher * to return a different value each time for both the currently 200d38ceaf9SAlex Deucher * polling process and the other process that xchg the last_seq 201d38ceaf9SAlex Deucher * btw atomic read and xchg of the current process. And the 202d38ceaf9SAlex Deucher * value the other process set as last seq must be higher than 203d38ceaf9SAlex Deucher * the seq value we just read. Which means that current process 20486c2b790SJammy Zhou * need to be interrupted after amdgpu_fence_read and before 205d38ceaf9SAlex Deucher * atomic xchg. 206d38ceaf9SAlex Deucher * 207d38ceaf9SAlex Deucher * To be even more safe we count the number of time we loop and 208d38ceaf9SAlex Deucher * we bail after 10 loop just accepting the fact that we might 209d38ceaf9SAlex Deucher * have temporarly set the last_seq not to the true real last 210d38ceaf9SAlex Deucher * seq but to an older one. 211d38ceaf9SAlex Deucher */ 212d38ceaf9SAlex Deucher last_seq = atomic64_read(&ring->fence_drv.last_seq); 213d38ceaf9SAlex Deucher do { 214d38ceaf9SAlex Deucher last_emitted = ring->fence_drv.sync_seq[ring->idx]; 215d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 216d38ceaf9SAlex Deucher seq |= last_seq & 0xffffffff00000000LL; 217d38ceaf9SAlex Deucher if (seq < last_seq) { 218d38ceaf9SAlex Deucher seq &= 0xffffffff; 219d38ceaf9SAlex Deucher seq |= last_emitted & 0xffffffff00000000LL; 220d38ceaf9SAlex Deucher } 221d38ceaf9SAlex Deucher 222d38ceaf9SAlex Deucher if (seq <= last_seq || seq > last_emitted) { 223d38ceaf9SAlex Deucher break; 224d38ceaf9SAlex Deucher } 225d38ceaf9SAlex Deucher /* If we loop over we don't want to return without 226d38ceaf9SAlex Deucher * checking if a fence is signaled as it means that the 227d38ceaf9SAlex Deucher * seq we just read is different from the previous on. 228d38ceaf9SAlex Deucher */ 229d38ceaf9SAlex Deucher wake = true; 230d38ceaf9SAlex Deucher last_seq = seq; 231d38ceaf9SAlex Deucher if ((count_loop++) > 10) { 232d38ceaf9SAlex Deucher /* We looped over too many time leave with the 233d38ceaf9SAlex Deucher * fact that we might have set an older fence 234d38ceaf9SAlex Deucher * seq then the current real last seq as signaled 235d38ceaf9SAlex Deucher * by the hw. 236d38ceaf9SAlex Deucher */ 237d38ceaf9SAlex Deucher break; 238d38ceaf9SAlex Deucher } 239d38ceaf9SAlex Deucher } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 240d38ceaf9SAlex Deucher 241d38ceaf9SAlex Deucher if (seq < last_emitted) 242d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 243d38ceaf9SAlex Deucher 244d38ceaf9SAlex Deucher return wake; 245d38ceaf9SAlex Deucher } 246d38ceaf9SAlex Deucher 247d38ceaf9SAlex Deucher /** 248d38ceaf9SAlex Deucher * amdgpu_fence_check_lockup - check for hardware lockup 249d38ceaf9SAlex Deucher * 250d38ceaf9SAlex Deucher * @work: delayed work item 251d38ceaf9SAlex Deucher * 252d38ceaf9SAlex Deucher * Checks for fence activity and if there is none probe 253d38ceaf9SAlex Deucher * the hardware if a lockup occured. 254d38ceaf9SAlex Deucher */ 255d38ceaf9SAlex Deucher static void amdgpu_fence_check_lockup(struct work_struct *work) 256d38ceaf9SAlex Deucher { 257d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fence_drv; 258d38ceaf9SAlex Deucher struct amdgpu_ring *ring; 259d38ceaf9SAlex Deucher 260d38ceaf9SAlex Deucher fence_drv = container_of(work, struct amdgpu_fence_driver, 261d38ceaf9SAlex Deucher lockup_work.work); 262d38ceaf9SAlex Deucher ring = fence_drv->ring; 263d38ceaf9SAlex Deucher 264d38ceaf9SAlex Deucher if (!down_read_trylock(&ring->adev->exclusive_lock)) { 265d38ceaf9SAlex Deucher /* just reschedule the check if a reset is going on */ 266d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 267d38ceaf9SAlex Deucher return; 268d38ceaf9SAlex Deucher } 269d38ceaf9SAlex Deucher 270d38ceaf9SAlex Deucher if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) { 271d38ceaf9SAlex Deucher fence_drv->delayed_irq = false; 272d38ceaf9SAlex Deucher amdgpu_irq_update(ring->adev, fence_drv->irq_src, 273d38ceaf9SAlex Deucher fence_drv->irq_type); 274d38ceaf9SAlex Deucher } 275d38ceaf9SAlex Deucher 276d38ceaf9SAlex Deucher if (amdgpu_fence_activity(ring)) 277d38ceaf9SAlex Deucher wake_up_all(&ring->adev->fence_queue); 278d38ceaf9SAlex Deucher else if (amdgpu_ring_is_lockup(ring)) { 279d38ceaf9SAlex Deucher /* good news we believe it's a lockup */ 280d38ceaf9SAlex Deucher dev_warn(ring->adev->dev, "GPU lockup (current fence id " 281d38ceaf9SAlex Deucher "0x%016llx last fence id 0x%016llx on ring %d)\n", 282d38ceaf9SAlex Deucher (uint64_t)atomic64_read(&fence_drv->last_seq), 283d38ceaf9SAlex Deucher fence_drv->sync_seq[ring->idx], ring->idx); 284d38ceaf9SAlex Deucher 285d38ceaf9SAlex Deucher /* remember that we need an reset */ 286d38ceaf9SAlex Deucher ring->adev->needs_reset = true; 287d38ceaf9SAlex Deucher wake_up_all(&ring->adev->fence_queue); 288d38ceaf9SAlex Deucher } 289d38ceaf9SAlex Deucher up_read(&ring->adev->exclusive_lock); 290d38ceaf9SAlex Deucher } 291d38ceaf9SAlex Deucher 292d38ceaf9SAlex Deucher /** 293d38ceaf9SAlex Deucher * amdgpu_fence_process - process a fence 294d38ceaf9SAlex Deucher * 295d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 296d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 297d38ceaf9SAlex Deucher * 298d38ceaf9SAlex Deucher * Checks the current fence value and wakes the fence queue 299d38ceaf9SAlex Deucher * if the sequence number has increased (all asics). 300d38ceaf9SAlex Deucher */ 301d38ceaf9SAlex Deucher void amdgpu_fence_process(struct amdgpu_ring *ring) 302d38ceaf9SAlex Deucher { 303d38ceaf9SAlex Deucher uint64_t seq, last_seq, last_emitted; 304d38ceaf9SAlex Deucher unsigned count_loop = 0; 305d38ceaf9SAlex Deucher bool wake = false; 306d38ceaf9SAlex Deucher 307d38ceaf9SAlex Deucher /* Note there is a scenario here for an infinite loop but it's 308d38ceaf9SAlex Deucher * very unlikely to happen. For it to happen, the current polling 309d38ceaf9SAlex Deucher * process need to be interrupted by another process and another 310d38ceaf9SAlex Deucher * process needs to update the last_seq btw the atomic read and 311d38ceaf9SAlex Deucher * xchg of the current process. 312d38ceaf9SAlex Deucher * 313d38ceaf9SAlex Deucher * More over for this to go in infinite loop there need to be 314d38ceaf9SAlex Deucher * continuously new fence signaled ie amdgpu_fence_read needs 315d38ceaf9SAlex Deucher * to return a different value each time for both the currently 316d38ceaf9SAlex Deucher * polling process and the other process that xchg the last_seq 317d38ceaf9SAlex Deucher * btw atomic read and xchg of the current process. And the 318d38ceaf9SAlex Deucher * value the other process set as last seq must be higher than 319d38ceaf9SAlex Deucher * the seq value we just read. Which means that current process 320d38ceaf9SAlex Deucher * need to be interrupted after amdgpu_fence_read and before 321d38ceaf9SAlex Deucher * atomic xchg. 322d38ceaf9SAlex Deucher * 323d38ceaf9SAlex Deucher * To be even more safe we count the number of time we loop and 324d38ceaf9SAlex Deucher * we bail after 10 loop just accepting the fact that we might 325d38ceaf9SAlex Deucher * have temporarly set the last_seq not to the true real last 326d38ceaf9SAlex Deucher * seq but to an older one. 327d38ceaf9SAlex Deucher */ 328d38ceaf9SAlex Deucher last_seq = atomic64_read(&ring->fence_drv.last_seq); 329d38ceaf9SAlex Deucher do { 330d38ceaf9SAlex Deucher last_emitted = ring->fence_drv.sync_seq[ring->idx]; 331d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 332d38ceaf9SAlex Deucher seq |= last_seq & 0xffffffff00000000LL; 333d38ceaf9SAlex Deucher if (seq < last_seq) { 334d38ceaf9SAlex Deucher seq &= 0xffffffff; 335d38ceaf9SAlex Deucher seq |= last_emitted & 0xffffffff00000000LL; 336d38ceaf9SAlex Deucher } 337d38ceaf9SAlex Deucher 338d38ceaf9SAlex Deucher if (seq <= last_seq || seq > last_emitted) { 339d38ceaf9SAlex Deucher break; 340d38ceaf9SAlex Deucher } 341d38ceaf9SAlex Deucher /* If we loop over we don't want to return without 342d38ceaf9SAlex Deucher * checking if a fence is signaled as it means that the 343d38ceaf9SAlex Deucher * seq we just read is different from the previous on. 344d38ceaf9SAlex Deucher */ 345d38ceaf9SAlex Deucher wake = true; 346d38ceaf9SAlex Deucher last_seq = seq; 347d38ceaf9SAlex Deucher if ((count_loop++) > 10) { 348d38ceaf9SAlex Deucher /* We looped over too many time leave with the 349d38ceaf9SAlex Deucher * fact that we might have set an older fence 350d38ceaf9SAlex Deucher * seq then the current real last seq as signaled 351d38ceaf9SAlex Deucher * by the hw. 352d38ceaf9SAlex Deucher */ 353d38ceaf9SAlex Deucher break; 354d38ceaf9SAlex Deucher } 355d38ceaf9SAlex Deucher } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 356d38ceaf9SAlex Deucher 357d38ceaf9SAlex Deucher if (wake) 358d38ceaf9SAlex Deucher wake_up_all(&ring->adev->fence_queue); 359d38ceaf9SAlex Deucher } 360d38ceaf9SAlex Deucher 361d38ceaf9SAlex Deucher /** 362d38ceaf9SAlex Deucher * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled 363d38ceaf9SAlex Deucher * 364d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 365d38ceaf9SAlex Deucher * @seq: sequence number 366d38ceaf9SAlex Deucher * 367d38ceaf9SAlex Deucher * Check if the last signaled fence sequnce number is >= the requested 368d38ceaf9SAlex Deucher * sequence number (all asics). 369d38ceaf9SAlex Deucher * Returns true if the fence has signaled (current fence value 370d38ceaf9SAlex Deucher * is >= requested value) or false if it has not (current fence 371d38ceaf9SAlex Deucher * value is < the requested value. Helper function for 372d38ceaf9SAlex Deucher * amdgpu_fence_signaled(). 373d38ceaf9SAlex Deucher */ 374d38ceaf9SAlex Deucher static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq) 375d38ceaf9SAlex Deucher { 376d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 377d38ceaf9SAlex Deucher return true; 378d38ceaf9SAlex Deucher 379d38ceaf9SAlex Deucher /* poll new last sequence at least once */ 380d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 381d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 382d38ceaf9SAlex Deucher return true; 383d38ceaf9SAlex Deucher 384d38ceaf9SAlex Deucher return false; 385d38ceaf9SAlex Deucher } 386d38ceaf9SAlex Deucher 387d38ceaf9SAlex Deucher static bool amdgpu_fence_is_signaled(struct fence *f) 388d38ceaf9SAlex Deucher { 389d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 390d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 391d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 392d38ceaf9SAlex Deucher 393d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 394d38ceaf9SAlex Deucher return true; 395d38ceaf9SAlex Deucher 396d38ceaf9SAlex Deucher if (down_read_trylock(&adev->exclusive_lock)) { 397d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 398d38ceaf9SAlex Deucher up_read(&adev->exclusive_lock); 399d38ceaf9SAlex Deucher 400d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 401d38ceaf9SAlex Deucher return true; 402d38ceaf9SAlex Deucher } 403d38ceaf9SAlex Deucher return false; 404d38ceaf9SAlex Deucher } 405d38ceaf9SAlex Deucher 406d38ceaf9SAlex Deucher /** 407d38ceaf9SAlex Deucher * amdgpu_fence_enable_signaling - enable signalling on fence 408d38ceaf9SAlex Deucher * @fence: fence 409d38ceaf9SAlex Deucher * 410d38ceaf9SAlex Deucher * This function is called with fence_queue lock held, and adds a callback 411d38ceaf9SAlex Deucher * to fence_queue that checks if this fence is signaled, and if so it 412d38ceaf9SAlex Deucher * signals the fence and removes itself. 413d38ceaf9SAlex Deucher */ 414d38ceaf9SAlex Deucher static bool amdgpu_fence_enable_signaling(struct fence *f) 415d38ceaf9SAlex Deucher { 416d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 417d38ceaf9SAlex Deucher struct amdgpu_ring *ring = fence->ring; 418d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 419d38ceaf9SAlex Deucher 420d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) 421d38ceaf9SAlex Deucher return false; 422d38ceaf9SAlex Deucher 423d38ceaf9SAlex Deucher if (down_read_trylock(&adev->exclusive_lock)) { 424d38ceaf9SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 425d38ceaf9SAlex Deucher ring->fence_drv.irq_type); 426d38ceaf9SAlex Deucher if (amdgpu_fence_activity(ring)) 427d38ceaf9SAlex Deucher wake_up_all_locked(&adev->fence_queue); 428d38ceaf9SAlex Deucher 429d38ceaf9SAlex Deucher /* did fence get signaled after we enabled the sw irq? */ 430d38ceaf9SAlex Deucher if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) { 431d38ceaf9SAlex Deucher amdgpu_irq_put(adev, ring->fence_drv.irq_src, 432d38ceaf9SAlex Deucher ring->fence_drv.irq_type); 433d38ceaf9SAlex Deucher up_read(&adev->exclusive_lock); 434d38ceaf9SAlex Deucher return false; 435d38ceaf9SAlex Deucher } 436d38ceaf9SAlex Deucher 437d38ceaf9SAlex Deucher up_read(&adev->exclusive_lock); 438d38ceaf9SAlex Deucher } else { 439d38ceaf9SAlex Deucher /* we're probably in a lockup, lets not fiddle too much */ 440d38ceaf9SAlex Deucher if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src, 441d38ceaf9SAlex Deucher ring->fence_drv.irq_type)) 442d38ceaf9SAlex Deucher ring->fence_drv.delayed_irq = true; 443d38ceaf9SAlex Deucher amdgpu_fence_schedule_check(ring); 444d38ceaf9SAlex Deucher } 445d38ceaf9SAlex Deucher 446d38ceaf9SAlex Deucher fence->fence_wake.flags = 0; 447d38ceaf9SAlex Deucher fence->fence_wake.private = NULL; 448d38ceaf9SAlex Deucher fence->fence_wake.func = amdgpu_fence_check_signaled; 449d38ceaf9SAlex Deucher __add_wait_queue(&adev->fence_queue, &fence->fence_wake); 450d38ceaf9SAlex Deucher fence_get(f); 451d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 452d38ceaf9SAlex Deucher return true; 453d38ceaf9SAlex Deucher } 454d38ceaf9SAlex Deucher 455d38ceaf9SAlex Deucher /** 456d38ceaf9SAlex Deucher * amdgpu_fence_signaled - check if a fence has signaled 457d38ceaf9SAlex Deucher * 458d38ceaf9SAlex Deucher * @fence: amdgpu fence object 459d38ceaf9SAlex Deucher * 460d38ceaf9SAlex Deucher * Check if the requested fence has signaled (all asics). 461d38ceaf9SAlex Deucher * Returns true if the fence has signaled or false if it has not. 462d38ceaf9SAlex Deucher */ 463d38ceaf9SAlex Deucher bool amdgpu_fence_signaled(struct amdgpu_fence *fence) 464d38ceaf9SAlex Deucher { 465d38ceaf9SAlex Deucher if (!fence) 466d38ceaf9SAlex Deucher return true; 467d38ceaf9SAlex Deucher 468d38ceaf9SAlex Deucher if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) { 469d38ceaf9SAlex Deucher if (!fence_signal(&fence->base)) 470d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n"); 471d38ceaf9SAlex Deucher return true; 472d38ceaf9SAlex Deucher } 473d38ceaf9SAlex Deucher 474d38ceaf9SAlex Deucher return false; 475d38ceaf9SAlex Deucher } 476d38ceaf9SAlex Deucher 477d38ceaf9SAlex Deucher /** 478d38ceaf9SAlex Deucher * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled 479d38ceaf9SAlex Deucher * 480d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 481d38ceaf9SAlex Deucher * @seq: sequence numbers 482d38ceaf9SAlex Deucher * 483d38ceaf9SAlex Deucher * Check if the last signaled fence sequnce number is >= the requested 484d38ceaf9SAlex Deucher * sequence number (all asics). 485d38ceaf9SAlex Deucher * Returns true if any has signaled (current value is >= requested value) 486d38ceaf9SAlex Deucher * or false if it has not. Helper function for amdgpu_fence_wait_seq. 487d38ceaf9SAlex Deucher */ 488d38ceaf9SAlex Deucher static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) 489d38ceaf9SAlex Deucher { 490d38ceaf9SAlex Deucher unsigned i; 491d38ceaf9SAlex Deucher 492d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 493d38ceaf9SAlex Deucher if (!adev->rings[i] || !seq[i]) 494d38ceaf9SAlex Deucher continue; 495d38ceaf9SAlex Deucher 496d38ceaf9SAlex Deucher if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i])) 497d38ceaf9SAlex Deucher return true; 498d38ceaf9SAlex Deucher } 499d38ceaf9SAlex Deucher 500d38ceaf9SAlex Deucher return false; 501d38ceaf9SAlex Deucher } 502d38ceaf9SAlex Deucher 503d38ceaf9SAlex Deucher /** 504d38ceaf9SAlex Deucher * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers 505d38ceaf9SAlex Deucher * 506d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 507d38ceaf9SAlex Deucher * @target_seq: sequence number(s) we want to wait for 508d38ceaf9SAlex Deucher * @intr: use interruptable sleep 509d38ceaf9SAlex Deucher * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait 510d38ceaf9SAlex Deucher * 511d38ceaf9SAlex Deucher * Wait for the requested sequence number(s) to be written by any ring 512d38ceaf9SAlex Deucher * (all asics). Sequnce number array is indexed by ring id. 513d38ceaf9SAlex Deucher * @intr selects whether to use interruptable (true) or non-interruptable 514d38ceaf9SAlex Deucher * (false) sleep when waiting for the sequence number. Helper function 515d38ceaf9SAlex Deucher * for amdgpu_fence_wait_*(). 516d38ceaf9SAlex Deucher * Returns remaining time if the sequence number has passed, 0 when 517d38ceaf9SAlex Deucher * the wait timeout, or an error for all other cases. 518d38ceaf9SAlex Deucher * -EDEADLK is returned when a GPU lockup has been detected. 519d38ceaf9SAlex Deucher */ 520d38ceaf9SAlex Deucher long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq, 521d38ceaf9SAlex Deucher bool intr, long timeout) 522d38ceaf9SAlex Deucher { 523d38ceaf9SAlex Deucher uint64_t last_seq[AMDGPU_MAX_RINGS]; 524d38ceaf9SAlex Deucher bool signaled; 525332300b9Smonk.liu int i; 526332300b9Smonk.liu long r; 527d38ceaf9SAlex Deucher 52825f45e63SJack Xiao if (timeout == 0) { 52925f45e63SJack Xiao return amdgpu_fence_any_seq_signaled(adev, target_seq); 53025f45e63SJack Xiao } 53125f45e63SJack Xiao 532d38ceaf9SAlex Deucher while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) { 533d38ceaf9SAlex Deucher 534d38ceaf9SAlex Deucher /* Save current sequence values, used to check for GPU lockups */ 535d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 536d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 537d38ceaf9SAlex Deucher 538d38ceaf9SAlex Deucher if (!ring || !target_seq[i]) 539d38ceaf9SAlex Deucher continue; 540d38ceaf9SAlex Deucher 541d38ceaf9SAlex Deucher last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); 542d38ceaf9SAlex Deucher trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); 543d38ceaf9SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 544d38ceaf9SAlex Deucher ring->fence_drv.irq_type); 545d38ceaf9SAlex Deucher } 546d38ceaf9SAlex Deucher 547d38ceaf9SAlex Deucher if (intr) { 548d38ceaf9SAlex Deucher r = wait_event_interruptible_timeout(adev->fence_queue, ( 549d38ceaf9SAlex Deucher (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) 550d38ceaf9SAlex Deucher || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 551d38ceaf9SAlex Deucher } else { 552d38ceaf9SAlex Deucher r = wait_event_timeout(adev->fence_queue, ( 553d38ceaf9SAlex Deucher (signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) 554d38ceaf9SAlex Deucher || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); 555d38ceaf9SAlex Deucher } 556d38ceaf9SAlex Deucher 557d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 558d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 559d38ceaf9SAlex Deucher 560d38ceaf9SAlex Deucher if (!ring || !target_seq[i]) 561d38ceaf9SAlex Deucher continue; 562d38ceaf9SAlex Deucher 563d38ceaf9SAlex Deucher amdgpu_irq_put(adev, ring->fence_drv.irq_src, 564d38ceaf9SAlex Deucher ring->fence_drv.irq_type); 565d38ceaf9SAlex Deucher trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); 566d38ceaf9SAlex Deucher } 567d38ceaf9SAlex Deucher 568d38ceaf9SAlex Deucher if (unlikely(r < 0)) 569d38ceaf9SAlex Deucher return r; 570d38ceaf9SAlex Deucher 571d38ceaf9SAlex Deucher if (unlikely(!signaled)) { 572d38ceaf9SAlex Deucher 573d38ceaf9SAlex Deucher if (adev->needs_reset) 574d38ceaf9SAlex Deucher return -EDEADLK; 575d38ceaf9SAlex Deucher 576d38ceaf9SAlex Deucher /* we were interrupted for some reason and fence 577d38ceaf9SAlex Deucher * isn't signaled yet, resume waiting */ 578d38ceaf9SAlex Deucher if (r) 579d38ceaf9SAlex Deucher continue; 580d38ceaf9SAlex Deucher 581d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 582d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 583d38ceaf9SAlex Deucher 584d38ceaf9SAlex Deucher if (!ring || !target_seq[i]) 585d38ceaf9SAlex Deucher continue; 586d38ceaf9SAlex Deucher 587d38ceaf9SAlex Deucher if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq)) 588d38ceaf9SAlex Deucher break; 589d38ceaf9SAlex Deucher } 590d38ceaf9SAlex Deucher 591d38ceaf9SAlex Deucher if (i != AMDGPU_MAX_RINGS) 592d38ceaf9SAlex Deucher continue; 593d38ceaf9SAlex Deucher 594d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 595d38ceaf9SAlex Deucher if (!adev->rings[i] || !target_seq[i]) 596d38ceaf9SAlex Deucher continue; 597d38ceaf9SAlex Deucher 598d38ceaf9SAlex Deucher if (amdgpu_ring_is_lockup(adev->rings[i])) 599d38ceaf9SAlex Deucher break; 600d38ceaf9SAlex Deucher } 601d38ceaf9SAlex Deucher 602d38ceaf9SAlex Deucher if (i < AMDGPU_MAX_RINGS) { 603d38ceaf9SAlex Deucher /* good news we believe it's a lockup */ 604d38ceaf9SAlex Deucher dev_warn(adev->dev, "GPU lockup (waiting for " 605d38ceaf9SAlex Deucher "0x%016llx last fence id 0x%016llx on" 606d38ceaf9SAlex Deucher " ring %d)\n", 607d38ceaf9SAlex Deucher target_seq[i], last_seq[i], i); 608d38ceaf9SAlex Deucher 609d38ceaf9SAlex Deucher /* remember that we need an reset */ 610d38ceaf9SAlex Deucher adev->needs_reset = true; 611d38ceaf9SAlex Deucher wake_up_all(&adev->fence_queue); 612d38ceaf9SAlex Deucher return -EDEADLK; 613d38ceaf9SAlex Deucher } 614d38ceaf9SAlex Deucher 615d38ceaf9SAlex Deucher if (timeout < MAX_SCHEDULE_TIMEOUT) { 616d38ceaf9SAlex Deucher timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; 617d38ceaf9SAlex Deucher if (timeout <= 0) { 618d38ceaf9SAlex Deucher return 0; 619d38ceaf9SAlex Deucher } 620d38ceaf9SAlex Deucher } 621d38ceaf9SAlex Deucher } 622d38ceaf9SAlex Deucher } 623d38ceaf9SAlex Deucher return timeout; 624d38ceaf9SAlex Deucher } 625d38ceaf9SAlex Deucher 626d38ceaf9SAlex Deucher /** 627d38ceaf9SAlex Deucher * amdgpu_fence_wait - wait for a fence to signal 628d38ceaf9SAlex Deucher * 629d38ceaf9SAlex Deucher * @fence: amdgpu fence object 630d38ceaf9SAlex Deucher * @intr: use interruptable sleep 631d38ceaf9SAlex Deucher * 632d38ceaf9SAlex Deucher * Wait for the requested fence to signal (all asics). 633d38ceaf9SAlex Deucher * @intr selects whether to use interruptable (true) or non-interruptable 634d38ceaf9SAlex Deucher * (false) sleep when waiting for the fence. 635d38ceaf9SAlex Deucher * Returns 0 if the fence has passed, error for all other cases. 636d38ceaf9SAlex Deucher */ 637d38ceaf9SAlex Deucher int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) 638d38ceaf9SAlex Deucher { 639d38ceaf9SAlex Deucher uint64_t seq[AMDGPU_MAX_RINGS] = {}; 640d38ceaf9SAlex Deucher long r; 641d38ceaf9SAlex Deucher 642d38ceaf9SAlex Deucher seq[fence->ring->idx] = fence->seq; 643d38ceaf9SAlex Deucher r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT); 644d38ceaf9SAlex Deucher if (r < 0) { 645d38ceaf9SAlex Deucher return r; 646d38ceaf9SAlex Deucher } 647d38ceaf9SAlex Deucher 648d38ceaf9SAlex Deucher r = fence_signal(&fence->base); 649d38ceaf9SAlex Deucher if (!r) 650d38ceaf9SAlex Deucher FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); 651d38ceaf9SAlex Deucher return 0; 652d38ceaf9SAlex Deucher } 653d38ceaf9SAlex Deucher 654d38ceaf9SAlex Deucher /** 655d38ceaf9SAlex Deucher * amdgpu_fence_wait_any - wait for a fence to signal on any ring 656d38ceaf9SAlex Deucher * 657d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 658d38ceaf9SAlex Deucher * @fences: amdgpu fence object(s) 659d38ceaf9SAlex Deucher * @intr: use interruptable sleep 660d38ceaf9SAlex Deucher * 661d38ceaf9SAlex Deucher * Wait for any requested fence to signal (all asics). Fence 662d38ceaf9SAlex Deucher * array is indexed by ring id. @intr selects whether to use 663d38ceaf9SAlex Deucher * interruptable (true) or non-interruptable (false) sleep when 664d38ceaf9SAlex Deucher * waiting for the fences. Used by the suballocator. 665d38ceaf9SAlex Deucher * Returns 0 if any fence has passed, error for all other cases. 666d38ceaf9SAlex Deucher */ 667d38ceaf9SAlex Deucher int amdgpu_fence_wait_any(struct amdgpu_device *adev, 668d38ceaf9SAlex Deucher struct amdgpu_fence **fences, 669d38ceaf9SAlex Deucher bool intr) 670d38ceaf9SAlex Deucher { 671d38ceaf9SAlex Deucher uint64_t seq[AMDGPU_MAX_RINGS]; 672d38ceaf9SAlex Deucher unsigned i, num_rings = 0; 673d38ceaf9SAlex Deucher long r; 674d38ceaf9SAlex Deucher 675d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 676d38ceaf9SAlex Deucher seq[i] = 0; 677d38ceaf9SAlex Deucher 678d38ceaf9SAlex Deucher if (!fences[i]) { 679d38ceaf9SAlex Deucher continue; 680d38ceaf9SAlex Deucher } 681d38ceaf9SAlex Deucher 682d38ceaf9SAlex Deucher seq[i] = fences[i]->seq; 683d38ceaf9SAlex Deucher ++num_rings; 684d38ceaf9SAlex Deucher } 685d38ceaf9SAlex Deucher 686d38ceaf9SAlex Deucher /* nothing to wait for ? */ 687d38ceaf9SAlex Deucher if (num_rings == 0) 688d38ceaf9SAlex Deucher return -ENOENT; 689d38ceaf9SAlex Deucher 690d38ceaf9SAlex Deucher r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT); 691d38ceaf9SAlex Deucher if (r < 0) { 692d38ceaf9SAlex Deucher return r; 693d38ceaf9SAlex Deucher } 694d38ceaf9SAlex Deucher return 0; 695d38ceaf9SAlex Deucher } 696d38ceaf9SAlex Deucher 697d38ceaf9SAlex Deucher /** 698d38ceaf9SAlex Deucher * amdgpu_fence_wait_next - wait for the next fence to signal 699d38ceaf9SAlex Deucher * 700d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 701d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 702d38ceaf9SAlex Deucher * 703d38ceaf9SAlex Deucher * Wait for the next fence on the requested ring to signal (all asics). 704d38ceaf9SAlex Deucher * Returns 0 if the next fence has passed, error for all other cases. 705d38ceaf9SAlex Deucher * Caller must hold ring lock. 706d38ceaf9SAlex Deucher */ 707d38ceaf9SAlex Deucher int amdgpu_fence_wait_next(struct amdgpu_ring *ring) 708d38ceaf9SAlex Deucher { 709d38ceaf9SAlex Deucher uint64_t seq[AMDGPU_MAX_RINGS] = {}; 710d38ceaf9SAlex Deucher long r; 711d38ceaf9SAlex Deucher 712d38ceaf9SAlex Deucher seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; 713d38ceaf9SAlex Deucher if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { 714d38ceaf9SAlex Deucher /* nothing to wait for, last_seq is 715d38ceaf9SAlex Deucher already the last emited fence */ 716d38ceaf9SAlex Deucher return -ENOENT; 717d38ceaf9SAlex Deucher } 718d38ceaf9SAlex Deucher r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT); 719d38ceaf9SAlex Deucher if (r < 0) 720d38ceaf9SAlex Deucher return r; 721d38ceaf9SAlex Deucher return 0; 722d38ceaf9SAlex Deucher } 723d38ceaf9SAlex Deucher 724d38ceaf9SAlex Deucher /** 725d38ceaf9SAlex Deucher * amdgpu_fence_wait_empty - wait for all fences to signal 726d38ceaf9SAlex Deucher * 727d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 728d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 729d38ceaf9SAlex Deucher * 730d38ceaf9SAlex Deucher * Wait for all fences on the requested ring to signal (all asics). 731d38ceaf9SAlex Deucher * Returns 0 if the fences have passed, error for all other cases. 732d38ceaf9SAlex Deucher * Caller must hold ring lock. 733d38ceaf9SAlex Deucher */ 734d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 735d38ceaf9SAlex Deucher { 736d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 737d38ceaf9SAlex Deucher uint64_t seq[AMDGPU_MAX_RINGS] = {}; 738d38ceaf9SAlex Deucher long r; 739d38ceaf9SAlex Deucher 740d38ceaf9SAlex Deucher seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; 741d38ceaf9SAlex Deucher if (!seq[ring->idx]) 742d38ceaf9SAlex Deucher return 0; 743d38ceaf9SAlex Deucher 744d38ceaf9SAlex Deucher r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); 745d38ceaf9SAlex Deucher if (r < 0) { 746d38ceaf9SAlex Deucher if (r == -EDEADLK) 747d38ceaf9SAlex Deucher return -EDEADLK; 748d38ceaf9SAlex Deucher 749d38ceaf9SAlex Deucher dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", 750d38ceaf9SAlex Deucher ring->idx, r); 751d38ceaf9SAlex Deucher } 752d38ceaf9SAlex Deucher return 0; 753d38ceaf9SAlex Deucher } 754d38ceaf9SAlex Deucher 755d38ceaf9SAlex Deucher /** 756d38ceaf9SAlex Deucher * amdgpu_fence_ref - take a ref on a fence 757d38ceaf9SAlex Deucher * 758d38ceaf9SAlex Deucher * @fence: amdgpu fence object 759d38ceaf9SAlex Deucher * 760d38ceaf9SAlex Deucher * Take a reference on a fence (all asics). 761d38ceaf9SAlex Deucher * Returns the fence. 762d38ceaf9SAlex Deucher */ 763d38ceaf9SAlex Deucher struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence) 764d38ceaf9SAlex Deucher { 765d38ceaf9SAlex Deucher fence_get(&fence->base); 766d38ceaf9SAlex Deucher return fence; 767d38ceaf9SAlex Deucher } 768d38ceaf9SAlex Deucher 769d38ceaf9SAlex Deucher /** 770d38ceaf9SAlex Deucher * amdgpu_fence_unref - remove a ref on a fence 771d38ceaf9SAlex Deucher * 772d38ceaf9SAlex Deucher * @fence: amdgpu fence object 773d38ceaf9SAlex Deucher * 774d38ceaf9SAlex Deucher * Remove a reference on a fence (all asics). 775d38ceaf9SAlex Deucher */ 776d38ceaf9SAlex Deucher void amdgpu_fence_unref(struct amdgpu_fence **fence) 777d38ceaf9SAlex Deucher { 778d38ceaf9SAlex Deucher struct amdgpu_fence *tmp = *fence; 779d38ceaf9SAlex Deucher 780d38ceaf9SAlex Deucher *fence = NULL; 781d38ceaf9SAlex Deucher if (tmp) 782d38ceaf9SAlex Deucher fence_put(&tmp->base); 783d38ceaf9SAlex Deucher } 784d38ceaf9SAlex Deucher 785d38ceaf9SAlex Deucher /** 786d38ceaf9SAlex Deucher * amdgpu_fence_count_emitted - get the count of emitted fences 787d38ceaf9SAlex Deucher * 788d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 789d38ceaf9SAlex Deucher * 790d38ceaf9SAlex Deucher * Get the number of fences emitted on the requested ring (all asics). 791d38ceaf9SAlex Deucher * Returns the number of emitted fences on the ring. Used by the 792d38ceaf9SAlex Deucher * dynpm code to ring track activity. 793d38ceaf9SAlex Deucher */ 794d38ceaf9SAlex Deucher unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 795d38ceaf9SAlex Deucher { 796d38ceaf9SAlex Deucher uint64_t emitted; 797d38ceaf9SAlex Deucher 798d38ceaf9SAlex Deucher /* We are not protected by ring lock when reading the last sequence 799d38ceaf9SAlex Deucher * but it's ok to report slightly wrong fence count here. 800d38ceaf9SAlex Deucher */ 801d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 802d38ceaf9SAlex Deucher emitted = ring->fence_drv.sync_seq[ring->idx] 803d38ceaf9SAlex Deucher - atomic64_read(&ring->fence_drv.last_seq); 804d38ceaf9SAlex Deucher /* to avoid 32bits warp around */ 805d38ceaf9SAlex Deucher if (emitted > 0x10000000) 806d38ceaf9SAlex Deucher emitted = 0x10000000; 807d38ceaf9SAlex Deucher 808d38ceaf9SAlex Deucher return (unsigned)emitted; 809d38ceaf9SAlex Deucher } 810d38ceaf9SAlex Deucher 811d38ceaf9SAlex Deucher /** 812d38ceaf9SAlex Deucher * amdgpu_fence_need_sync - do we need a semaphore 813d38ceaf9SAlex Deucher * 814d38ceaf9SAlex Deucher * @fence: amdgpu fence object 815d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 816d38ceaf9SAlex Deucher * 817d38ceaf9SAlex Deucher * Check if the fence needs to be synced against another ring 818d38ceaf9SAlex Deucher * (all asics). If so, we need to emit a semaphore. 819d38ceaf9SAlex Deucher * Returns true if we need to sync with another ring, false if 820d38ceaf9SAlex Deucher * not. 821d38ceaf9SAlex Deucher */ 822d38ceaf9SAlex Deucher bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 823d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 824d38ceaf9SAlex Deucher { 825d38ceaf9SAlex Deucher struct amdgpu_fence_driver *fdrv; 826d38ceaf9SAlex Deucher 827d38ceaf9SAlex Deucher if (!fence) 828d38ceaf9SAlex Deucher return false; 829d38ceaf9SAlex Deucher 830d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 831d38ceaf9SAlex Deucher return false; 832d38ceaf9SAlex Deucher 833d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 834d38ceaf9SAlex Deucher fdrv = &dst_ring->fence_drv; 835d38ceaf9SAlex Deucher if (fence->seq <= fdrv->sync_seq[fence->ring->idx]) 836d38ceaf9SAlex Deucher return false; 837d38ceaf9SAlex Deucher 838d38ceaf9SAlex Deucher return true; 839d38ceaf9SAlex Deucher } 840d38ceaf9SAlex Deucher 841d38ceaf9SAlex Deucher /** 842d38ceaf9SAlex Deucher * amdgpu_fence_note_sync - record the sync point 843d38ceaf9SAlex Deucher * 844d38ceaf9SAlex Deucher * @fence: amdgpu fence object 845d38ceaf9SAlex Deucher * @dst_ring: which ring to check against 846d38ceaf9SAlex Deucher * 847d38ceaf9SAlex Deucher * Note the sequence number at which point the fence will 848d38ceaf9SAlex Deucher * be synced with the requested ring (all asics). 849d38ceaf9SAlex Deucher */ 850d38ceaf9SAlex Deucher void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 851d38ceaf9SAlex Deucher struct amdgpu_ring *dst_ring) 852d38ceaf9SAlex Deucher { 853d38ceaf9SAlex Deucher struct amdgpu_fence_driver *dst, *src; 854d38ceaf9SAlex Deucher unsigned i; 855d38ceaf9SAlex Deucher 856d38ceaf9SAlex Deucher if (!fence) 857d38ceaf9SAlex Deucher return; 858d38ceaf9SAlex Deucher 859d38ceaf9SAlex Deucher if (fence->ring == dst_ring) 860d38ceaf9SAlex Deucher return; 861d38ceaf9SAlex Deucher 862d38ceaf9SAlex Deucher /* we are protected by the ring mutex */ 863d38ceaf9SAlex Deucher src = &fence->ring->fence_drv; 864d38ceaf9SAlex Deucher dst = &dst_ring->fence_drv; 865d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 866d38ceaf9SAlex Deucher if (i == dst_ring->idx) 867d38ceaf9SAlex Deucher continue; 868d38ceaf9SAlex Deucher 869d38ceaf9SAlex Deucher dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); 870d38ceaf9SAlex Deucher } 871d38ceaf9SAlex Deucher } 872d38ceaf9SAlex Deucher 873d38ceaf9SAlex Deucher /** 874d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring - make the fence driver 875d38ceaf9SAlex Deucher * ready for use on the requested ring. 876d38ceaf9SAlex Deucher * 877d38ceaf9SAlex Deucher * @ring: ring to start the fence driver on 878d38ceaf9SAlex Deucher * @irq_src: interrupt source to use for this ring 879d38ceaf9SAlex Deucher * @irq_type: interrupt type to use for this ring 880d38ceaf9SAlex Deucher * 881d38ceaf9SAlex Deucher * Make the fence driver ready for processing (all asics). 882d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 883d38ceaf9SAlex Deucher * start the fence driver on the rings it has. 884d38ceaf9SAlex Deucher * Returns 0 for success, errors for failure. 885d38ceaf9SAlex Deucher */ 886d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 887d38ceaf9SAlex Deucher struct amdgpu_irq_src *irq_src, 888d38ceaf9SAlex Deucher unsigned irq_type) 889d38ceaf9SAlex Deucher { 890d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 891d38ceaf9SAlex Deucher uint64_t index; 892d38ceaf9SAlex Deucher 893d38ceaf9SAlex Deucher if (ring != &adev->uvd.ring) { 894d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 895d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 896d38ceaf9SAlex Deucher } else { 897d38ceaf9SAlex Deucher /* put fence directly behind firmware */ 898d38ceaf9SAlex Deucher index = ALIGN(adev->uvd.fw->size, 8); 899d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 900d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 901d38ceaf9SAlex Deucher } 902d38ceaf9SAlex Deucher amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq)); 903d38ceaf9SAlex Deucher ring->fence_drv.initialized = true; 904d38ceaf9SAlex Deucher ring->fence_drv.irq_src = irq_src; 905d38ceaf9SAlex Deucher ring->fence_drv.irq_type = irq_type; 906d38ceaf9SAlex Deucher dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 907d38ceaf9SAlex Deucher "cpu addr 0x%p\n", ring->idx, 908d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 909d38ceaf9SAlex Deucher return 0; 910d38ceaf9SAlex Deucher } 911d38ceaf9SAlex Deucher 912d38ceaf9SAlex Deucher /** 913d38ceaf9SAlex Deucher * amdgpu_fence_driver_init_ring - init the fence driver 914d38ceaf9SAlex Deucher * for the requested ring. 915d38ceaf9SAlex Deucher * 916d38ceaf9SAlex Deucher * @ring: ring to init the fence driver on 917d38ceaf9SAlex Deucher * 918d38ceaf9SAlex Deucher * Init the fence driver for the requested ring (all asics). 919d38ceaf9SAlex Deucher * Helper function for amdgpu_fence_driver_init(). 920d38ceaf9SAlex Deucher */ 921d38ceaf9SAlex Deucher void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 922d38ceaf9SAlex Deucher { 923d38ceaf9SAlex Deucher int i; 924d38ceaf9SAlex Deucher 925d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = NULL; 926d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = 0; 927d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 928d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i] = 0; 929d38ceaf9SAlex Deucher 930d38ceaf9SAlex Deucher atomic64_set(&ring->fence_drv.last_seq, 0); 931d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 932d38ceaf9SAlex Deucher 933d38ceaf9SAlex Deucher INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 934d38ceaf9SAlex Deucher amdgpu_fence_check_lockup); 935d38ceaf9SAlex Deucher ring->fence_drv.ring = ring; 936d38ceaf9SAlex Deucher } 937d38ceaf9SAlex Deucher 938d38ceaf9SAlex Deucher /** 939d38ceaf9SAlex Deucher * amdgpu_fence_driver_init - init the fence driver 940d38ceaf9SAlex Deucher * for all possible rings. 941d38ceaf9SAlex Deucher * 942d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 943d38ceaf9SAlex Deucher * 944d38ceaf9SAlex Deucher * Init the fence driver for all possible rings (all asics). 945d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 946d38ceaf9SAlex Deucher * start the fence driver on the rings it has using 947d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring(). 948d38ceaf9SAlex Deucher * Returns 0 for success. 949d38ceaf9SAlex Deucher */ 950d38ceaf9SAlex Deucher int amdgpu_fence_driver_init(struct amdgpu_device *adev) 951d38ceaf9SAlex Deucher { 952d38ceaf9SAlex Deucher init_waitqueue_head(&adev->fence_queue); 953d38ceaf9SAlex Deucher if (amdgpu_debugfs_fence_init(adev)) 954d38ceaf9SAlex Deucher dev_err(adev->dev, "fence debugfs file creation failed\n"); 955d38ceaf9SAlex Deucher 956d38ceaf9SAlex Deucher return 0; 957d38ceaf9SAlex Deucher } 958d38ceaf9SAlex Deucher 959d38ceaf9SAlex Deucher /** 960d38ceaf9SAlex Deucher * amdgpu_fence_driver_fini - tear down the fence driver 961d38ceaf9SAlex Deucher * for all possible rings. 962d38ceaf9SAlex Deucher * 963d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 964d38ceaf9SAlex Deucher * 965d38ceaf9SAlex Deucher * Tear down the fence driver for all possible rings (all asics). 966d38ceaf9SAlex Deucher */ 967d38ceaf9SAlex Deucher void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 968d38ceaf9SAlex Deucher { 969d38ceaf9SAlex Deucher int i, r; 970d38ceaf9SAlex Deucher 971d38ceaf9SAlex Deucher mutex_lock(&adev->ring_lock); 972d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 973d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 974d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 975d38ceaf9SAlex Deucher continue; 976d38ceaf9SAlex Deucher r = amdgpu_fence_wait_empty(ring); 977d38ceaf9SAlex Deucher if (r) { 978d38ceaf9SAlex Deucher /* no need to trigger GPU reset as we are unloading */ 979d38ceaf9SAlex Deucher amdgpu_fence_driver_force_completion(adev); 980d38ceaf9SAlex Deucher } 981d38ceaf9SAlex Deucher wake_up_all(&adev->fence_queue); 982d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 983d38ceaf9SAlex Deucher } 984d38ceaf9SAlex Deucher mutex_unlock(&adev->ring_lock); 985d38ceaf9SAlex Deucher } 986d38ceaf9SAlex Deucher 987d38ceaf9SAlex Deucher /** 988d38ceaf9SAlex Deucher * amdgpu_fence_driver_force_completion - force all fence waiter to complete 989d38ceaf9SAlex Deucher * 990d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 991d38ceaf9SAlex Deucher * 992d38ceaf9SAlex Deucher * In case of GPU reset failure make sure no process keep waiting on fence 993d38ceaf9SAlex Deucher * that will never complete. 994d38ceaf9SAlex Deucher */ 995d38ceaf9SAlex Deucher void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev) 996d38ceaf9SAlex Deucher { 997d38ceaf9SAlex Deucher int i; 998d38ceaf9SAlex Deucher 999d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1000d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 1001d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 1002d38ceaf9SAlex Deucher continue; 1003d38ceaf9SAlex Deucher 1004d38ceaf9SAlex Deucher amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]); 1005d38ceaf9SAlex Deucher } 1006d38ceaf9SAlex Deucher } 1007d38ceaf9SAlex Deucher 1008d38ceaf9SAlex Deucher 1009d38ceaf9SAlex Deucher /* 1010d38ceaf9SAlex Deucher * Fence debugfs 1011d38ceaf9SAlex Deucher */ 1012d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 1013d38ceaf9SAlex Deucher static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 1014d38ceaf9SAlex Deucher { 1015d38ceaf9SAlex Deucher struct drm_info_node *node = (struct drm_info_node *)m->private; 1016d38ceaf9SAlex Deucher struct drm_device *dev = node->minor->dev; 1017d38ceaf9SAlex Deucher struct amdgpu_device *adev = dev->dev_private; 1018d38ceaf9SAlex Deucher int i, j; 1019d38ceaf9SAlex Deucher 1020d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1021d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 1022d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 1023d38ceaf9SAlex Deucher continue; 1024d38ceaf9SAlex Deucher 1025d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 1026d38ceaf9SAlex Deucher 1027344c19f9SChristian König seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 1028d38ceaf9SAlex Deucher seq_printf(m, "Last signaled fence 0x%016llx\n", 1029d38ceaf9SAlex Deucher (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 1030d38ceaf9SAlex Deucher seq_printf(m, "Last emitted 0x%016llx\n", 1031d38ceaf9SAlex Deucher ring->fence_drv.sync_seq[i]); 1032d38ceaf9SAlex Deucher 1033d38ceaf9SAlex Deucher for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 1034d38ceaf9SAlex Deucher struct amdgpu_ring *other = adev->rings[j]; 1035344c19f9SChristian König if (i != j && other && other->fence_drv.initialized && 1036344c19f9SChristian König ring->fence_drv.sync_seq[j]) 1037d38ceaf9SAlex Deucher seq_printf(m, "Last sync to ring %d 0x%016llx\n", 1038d38ceaf9SAlex Deucher j, ring->fence_drv.sync_seq[j]); 1039d38ceaf9SAlex Deucher } 1040d38ceaf9SAlex Deucher } 1041d38ceaf9SAlex Deucher return 0; 1042d38ceaf9SAlex Deucher } 1043d38ceaf9SAlex Deucher 1044d38ceaf9SAlex Deucher static struct drm_info_list amdgpu_debugfs_fence_list[] = { 1045d38ceaf9SAlex Deucher {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 1046d38ceaf9SAlex Deucher }; 1047d38ceaf9SAlex Deucher #endif 1048d38ceaf9SAlex Deucher 1049d38ceaf9SAlex Deucher int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 1050d38ceaf9SAlex Deucher { 1051d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 1052d38ceaf9SAlex Deucher return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1); 1053d38ceaf9SAlex Deucher #else 1054d38ceaf9SAlex Deucher return 0; 1055d38ceaf9SAlex Deucher #endif 1056d38ceaf9SAlex Deucher } 1057d38ceaf9SAlex Deucher 1058d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_driver_name(struct fence *fence) 1059d38ceaf9SAlex Deucher { 1060d38ceaf9SAlex Deucher return "amdgpu"; 1061d38ceaf9SAlex Deucher } 1062d38ceaf9SAlex Deucher 1063d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_timeline_name(struct fence *f) 1064d38ceaf9SAlex Deucher { 1065d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 1066d38ceaf9SAlex Deucher return (const char *)fence->ring->name; 1067d38ceaf9SAlex Deucher } 1068d38ceaf9SAlex Deucher 1069d38ceaf9SAlex Deucher static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) 1070d38ceaf9SAlex Deucher { 1071d38ceaf9SAlex Deucher return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1072d38ceaf9SAlex Deucher } 1073d38ceaf9SAlex Deucher 1074d38ceaf9SAlex Deucher struct amdgpu_wait_cb { 1075d38ceaf9SAlex Deucher struct fence_cb base; 1076d38ceaf9SAlex Deucher struct task_struct *task; 1077d38ceaf9SAlex Deucher }; 1078d38ceaf9SAlex Deucher 1079d38ceaf9SAlex Deucher static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) 1080d38ceaf9SAlex Deucher { 1081d38ceaf9SAlex Deucher struct amdgpu_wait_cb *wait = 1082d38ceaf9SAlex Deucher container_of(cb, struct amdgpu_wait_cb, base); 1083d38ceaf9SAlex Deucher wake_up_process(wait->task); 1084d38ceaf9SAlex Deucher } 1085d38ceaf9SAlex Deucher 1086d38ceaf9SAlex Deucher static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, 1087d38ceaf9SAlex Deucher signed long t) 1088d38ceaf9SAlex Deucher { 1089d38ceaf9SAlex Deucher struct amdgpu_fence *fence = to_amdgpu_fence(f); 1090d38ceaf9SAlex Deucher struct amdgpu_device *adev = fence->ring->adev; 1091d38ceaf9SAlex Deucher struct amdgpu_wait_cb cb; 1092d38ceaf9SAlex Deucher 1093d38ceaf9SAlex Deucher cb.task = current; 1094d38ceaf9SAlex Deucher 1095d38ceaf9SAlex Deucher if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb)) 1096d38ceaf9SAlex Deucher return t; 1097d38ceaf9SAlex Deucher 1098d38ceaf9SAlex Deucher while (t > 0) { 1099d38ceaf9SAlex Deucher if (intr) 1100d38ceaf9SAlex Deucher set_current_state(TASK_INTERRUPTIBLE); 1101d38ceaf9SAlex Deucher else 1102d38ceaf9SAlex Deucher set_current_state(TASK_UNINTERRUPTIBLE); 1103d38ceaf9SAlex Deucher 1104d38ceaf9SAlex Deucher /* 1105d38ceaf9SAlex Deucher * amdgpu_test_signaled must be called after 1106d38ceaf9SAlex Deucher * set_current_state to prevent a race with wake_up_process 1107d38ceaf9SAlex Deucher */ 1108d38ceaf9SAlex Deucher if (amdgpu_test_signaled(fence)) 1109d38ceaf9SAlex Deucher break; 1110d38ceaf9SAlex Deucher 1111d38ceaf9SAlex Deucher if (adev->needs_reset) { 1112d38ceaf9SAlex Deucher t = -EDEADLK; 1113d38ceaf9SAlex Deucher break; 1114d38ceaf9SAlex Deucher } 1115d38ceaf9SAlex Deucher 1116d38ceaf9SAlex Deucher t = schedule_timeout(t); 1117d38ceaf9SAlex Deucher 1118d38ceaf9SAlex Deucher if (t > 0 && intr && signal_pending(current)) 1119d38ceaf9SAlex Deucher t = -ERESTARTSYS; 1120d38ceaf9SAlex Deucher } 1121d38ceaf9SAlex Deucher 1122d38ceaf9SAlex Deucher __set_current_state(TASK_RUNNING); 1123d38ceaf9SAlex Deucher fence_remove_callback(f, &cb.base); 1124d38ceaf9SAlex Deucher 1125d38ceaf9SAlex Deucher return t; 1126d38ceaf9SAlex Deucher } 1127d38ceaf9SAlex Deucher 1128d38ceaf9SAlex Deucher const struct fence_ops amdgpu_fence_ops = { 1129d38ceaf9SAlex Deucher .get_driver_name = amdgpu_fence_get_driver_name, 1130d38ceaf9SAlex Deucher .get_timeline_name = amdgpu_fence_get_timeline_name, 1131d38ceaf9SAlex Deucher .enable_signaling = amdgpu_fence_enable_signaling, 1132d38ceaf9SAlex Deucher .signaled = amdgpu_fence_is_signaled, 1133d38ceaf9SAlex Deucher .wait = amdgpu_fence_default_wait, 1134d38ceaf9SAlex Deucher .release = NULL, 1135d38ceaf9SAlex Deucher }; 1136