1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher * Dave Airlie 30d38ceaf9SAlex Deucher */ 31d38ceaf9SAlex Deucher #include <linux/seq_file.h> 32d38ceaf9SAlex Deucher #include <linux/atomic.h> 33d38ceaf9SAlex Deucher #include <linux/wait.h> 34d38ceaf9SAlex Deucher #include <linux/kref.h> 35d38ceaf9SAlex Deucher #include <linux/slab.h> 36d38ceaf9SAlex Deucher #include <linux/firmware.h> 3745a80abeSAlex Deucher #include <linux/pm_runtime.h> 38fdf2f6c5SSam Ravnborg 3954a85db8SAndrey Grodzovsky #include <drm/drm_drv.h> 40d38ceaf9SAlex Deucher #include "amdgpu.h" 41d38ceaf9SAlex Deucher #include "amdgpu_trace.h" 422f83658fSAndrey Grodzovsky #include "amdgpu_reset.h" 43d38ceaf9SAlex Deucher 44d38ceaf9SAlex Deucher /* 45d38ceaf9SAlex Deucher * Fences mark an event in the GPUs pipeline and are used 46d38ceaf9SAlex Deucher * for GPU/CPU synchronization. When the fence is written, 47d38ceaf9SAlex Deucher * it is expected that all buffers associated with that fence 48d38ceaf9SAlex Deucher * are no longer in use by the associated ring on the GPU and 499dd4545fSSlark Xiao * that the relevant GPU caches have been flushed. 50d38ceaf9SAlex Deucher */ 51d38ceaf9SAlex Deucher 5222e5a2f4SChristian König struct amdgpu_fence { 53f54d1867SChris Wilson struct dma_fence base; 5422e5a2f4SChristian König 5522e5a2f4SChristian König /* RB, DMA, etc. */ 5622e5a2f4SChristian König struct amdgpu_ring *ring; 573f4c175dSJiadong.Zhu ktime_t start_timestamp; 5822e5a2f4SChristian König }; 5922e5a2f4SChristian König 60b49c84a5SChunming Zhou static struct kmem_cache *amdgpu_fence_slab; 61b49c84a5SChunming Zhou 62d573de2dSRex Zhu int amdgpu_fence_slab_init(void) 63d573de2dSRex Zhu { 64d573de2dSRex Zhu amdgpu_fence_slab = kmem_cache_create( 65d573de2dSRex Zhu "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 66d573de2dSRex Zhu SLAB_HWCACHE_ALIGN, NULL); 67d573de2dSRex Zhu if (!amdgpu_fence_slab) 68d573de2dSRex Zhu return -ENOMEM; 69d573de2dSRex Zhu return 0; 70d573de2dSRex Zhu } 71d573de2dSRex Zhu 72d573de2dSRex Zhu void amdgpu_fence_slab_fini(void) 73d573de2dSRex Zhu { 740f10425eSGrazvydas Ignotas rcu_barrier(); 75d573de2dSRex Zhu kmem_cache_destroy(amdgpu_fence_slab); 76d573de2dSRex Zhu } 7722e5a2f4SChristian König /* 7822e5a2f4SChristian König * Cast helper 7922e5a2f4SChristian König */ 80f54d1867SChris Wilson static const struct dma_fence_ops amdgpu_fence_ops; 81bf67014dSHuang Rui static const struct dma_fence_ops amdgpu_job_fence_ops; 82f54d1867SChris Wilson static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 8322e5a2f4SChristian König { 8422e5a2f4SChristian König struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 8522e5a2f4SChristian König 86bf67014dSHuang Rui if (__f->base.ops == &amdgpu_fence_ops || 87bf67014dSHuang Rui __f->base.ops == &amdgpu_job_fence_ops) 8822e5a2f4SChristian König return __f; 8922e5a2f4SChristian König 9022e5a2f4SChristian König return NULL; 9122e5a2f4SChristian König } 9222e5a2f4SChristian König 93d38ceaf9SAlex Deucher /** 94d38ceaf9SAlex Deucher * amdgpu_fence_write - write a fence value 95d38ceaf9SAlex Deucher * 96d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 97d38ceaf9SAlex Deucher * @seq: sequence number to write 98d38ceaf9SAlex Deucher * 99d38ceaf9SAlex Deucher * Writes a fence value to memory (all asics). 100d38ceaf9SAlex Deucher */ 101d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 102d38ceaf9SAlex Deucher { 103d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher if (drv->cpu_addr) 106d38ceaf9SAlex Deucher *drv->cpu_addr = cpu_to_le32(seq); 107d38ceaf9SAlex Deucher } 108d38ceaf9SAlex Deucher 109d38ceaf9SAlex Deucher /** 110d38ceaf9SAlex Deucher * amdgpu_fence_read - read a fence value 111d38ceaf9SAlex Deucher * 112d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 113d38ceaf9SAlex Deucher * 114d38ceaf9SAlex Deucher * Reads a fence value from memory (all asics). 115d38ceaf9SAlex Deucher * Returns the value of the fence read from memory. 116d38ceaf9SAlex Deucher */ 117d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 118d38ceaf9SAlex Deucher { 119d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 120d38ceaf9SAlex Deucher u32 seq = 0; 121d38ceaf9SAlex Deucher 122d38ceaf9SAlex Deucher if (drv->cpu_addr) 123d38ceaf9SAlex Deucher seq = le32_to_cpu(*drv->cpu_addr); 124d38ceaf9SAlex Deucher else 125742c085fSChristian König seq = atomic_read(&drv->last_seq); 126d38ceaf9SAlex Deucher 127d38ceaf9SAlex Deucher return seq; 128d38ceaf9SAlex Deucher } 129d38ceaf9SAlex Deucher 130d38ceaf9SAlex Deucher /** 131d38ceaf9SAlex Deucher * amdgpu_fence_emit - emit a fence on the requested ring 132d38ceaf9SAlex Deucher * 133d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 134364beb2cSChristian König * @f: resulting fence object 135c530b02fSJack Zhang * @job: job the fence is embedded in 136f02f8c32SLee Jones * @flags: flags to pass into the subordinate .emit_fence() call 137d38ceaf9SAlex Deucher * 138d38ceaf9SAlex Deucher * Emits a fence command on the requested ring (all asics). 139d38ceaf9SAlex Deucher * Returns 0 on success, -ENOMEM on failure. 140d38ceaf9SAlex Deucher */ 141c530b02fSJack Zhang int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, 1429e690184SSrinivasan Shanmugam unsigned int flags) 143d38ceaf9SAlex Deucher { 144d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 145c530b02fSJack Zhang struct dma_fence *fence; 146c530b02fSJack Zhang struct amdgpu_fence *am_fence; 1473d2aca8cSChristian König struct dma_fence __rcu **ptr; 148742c085fSChristian König uint32_t seq; 1493d2aca8cSChristian König int r; 150d38ceaf9SAlex Deucher 151c530b02fSJack Zhang if (job == NULL) { 152c530b02fSJack Zhang /* create a sperate hw fence */ 153c530b02fSJack Zhang am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); 154c530b02fSJack Zhang if (am_fence == NULL) 155d38ceaf9SAlex Deucher return -ENOMEM; 156c530b02fSJack Zhang fence = &am_fence->base; 157c530b02fSJack Zhang am_fence->ring = ring; 158c530b02fSJack Zhang } else { 159c530b02fSJack Zhang /* take use of job-embedded fence */ 160c530b02fSJack Zhang fence = &job->hw_fence; 161c530b02fSJack Zhang } 162364beb2cSChristian König 163742c085fSChristian König seq = ++ring->fence_drv.sync_seq; 164bf67014dSHuang Rui if (job && job->job_run_counter) { 165c530b02fSJack Zhang /* reinit seq for resubmitted jobs */ 166c530b02fSJack Zhang fence->seqno = seq; 1679ae55f03SAndrey Grodzovsky /* TO be inline with external fence creation and other drivers */ 1689ae55f03SAndrey Grodzovsky dma_fence_get(fence); 169c530b02fSJack Zhang } else { 1709ae55f03SAndrey Grodzovsky if (job) { 171bf67014dSHuang Rui dma_fence_init(fence, &amdgpu_job_fence_ops, 172bf67014dSHuang Rui &ring->fence_drv.lock, 173bf67014dSHuang Rui adev->fence_context + ring->idx, seq); 1749ae55f03SAndrey Grodzovsky /* Against remove in amdgpu_job_{free, free_cb} */ 1759ae55f03SAndrey Grodzovsky dma_fence_get(fence); 1769e690184SSrinivasan Shanmugam } else { 177c530b02fSJack Zhang dma_fence_init(fence, &amdgpu_fence_ops, 1784a7d74f1SChristian König &ring->fence_drv.lock, 179bf67014dSHuang Rui adev->fence_context + ring->idx, seq); 180c530b02fSJack Zhang } 1819e690184SSrinivasan Shanmugam } 182c530b02fSJack Zhang 183890ee23fSChunming Zhou amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 184d240cd9eSMarek Olšák seq, flags | AMDGPU_FENCE_FLAG_INT); 1854a580877SLuben Tuikov pm_runtime_get_noresume(adev_to_drm(adev)->dev); 186742c085fSChristian König ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 1873d2aca8cSChristian König if (unlikely(rcu_dereference_protected(*ptr, 1))) { 1883d2aca8cSChristian König struct dma_fence *old; 1893d2aca8cSChristian König 1903d2aca8cSChristian König rcu_read_lock(); 1913d2aca8cSChristian König old = dma_fence_get_rcu_safe(ptr); 1923d2aca8cSChristian König rcu_read_unlock(); 1933d2aca8cSChristian König 1943d2aca8cSChristian König if (old) { 1953d2aca8cSChristian König r = dma_fence_wait(old, false); 1963d2aca8cSChristian König dma_fence_put(old); 1973d2aca8cSChristian König if (r) 1983d2aca8cSChristian König return r; 1993d2aca8cSChristian König } 2003d2aca8cSChristian König } 2013d2aca8cSChristian König 2023f4c175dSJiadong.Zhu to_amdgpu_fence(fence)->start_timestamp = ktime_get(); 2033f4c175dSJiadong.Zhu 204c89377d1SChristian König /* This function can't be called concurrently anyway, otherwise 205c89377d1SChristian König * emitting the fence would mess up the hardware ring buffer. 206c89377d1SChristian König */ 207c530b02fSJack Zhang rcu_assign_pointer(*ptr, dma_fence_get(fence)); 208c89377d1SChristian König 209c530b02fSJack Zhang *f = fence; 210c89377d1SChristian König 211d38ceaf9SAlex Deucher return 0; 212d38ceaf9SAlex Deucher } 213d38ceaf9SAlex Deucher 214d38ceaf9SAlex Deucher /** 21543ca8efaSpding * amdgpu_fence_emit_polling - emit a fence on the requeste ring 21643ca8efaSpding * 21743ca8efaSpding * @ring: ring the fence is associated with 21843ca8efaSpding * @s: resulting sequence number 219f02f8c32SLee Jones * @timeout: the timeout for waiting in usecs 22043ca8efaSpding * 22143ca8efaSpding * Emits a fence command on the requested ring (all asics). 22243ca8efaSpding * Used For polling fence. 22343ca8efaSpding * Returns 0 on success, -ENOMEM on failure. 22443ca8efaSpding */ 22504e4e2e9SYintian Tao int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, 22604e4e2e9SYintian Tao uint32_t timeout) 22743ca8efaSpding { 22843ca8efaSpding uint32_t seq; 22904e4e2e9SYintian Tao signed long r; 23043ca8efaSpding 23143ca8efaSpding if (!s) 23243ca8efaSpding return -EINVAL; 23343ca8efaSpding 23443ca8efaSpding seq = ++ring->fence_drv.sync_seq; 23504e4e2e9SYintian Tao r = amdgpu_fence_wait_polling(ring, 23604e4e2e9SYintian Tao seq - ring->fence_drv.num_fences_mask, 23704e4e2e9SYintian Tao timeout); 23804e4e2e9SYintian Tao if (r < 1) 23904e4e2e9SYintian Tao return -ETIMEDOUT; 24004e4e2e9SYintian Tao 24143ca8efaSpding amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 242d118a621SMonk Liu seq, 0); 24343ca8efaSpding 24443ca8efaSpding *s = seq; 24543ca8efaSpding 24643ca8efaSpding return 0; 24743ca8efaSpding } 24843ca8efaSpding 24943ca8efaSpding /** 2508c5e13ecSAndrey Grodzovsky * amdgpu_fence_schedule_fallback - schedule fallback check 2518c5e13ecSAndrey Grodzovsky * 2528c5e13ecSAndrey Grodzovsky * @ring: pointer to struct amdgpu_ring 2538c5e13ecSAndrey Grodzovsky * 2548c5e13ecSAndrey Grodzovsky * Start a timer as fallback to our interrupts. 2558c5e13ecSAndrey Grodzovsky */ 2568c5e13ecSAndrey Grodzovsky static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 2578c5e13ecSAndrey Grodzovsky { 2588c5e13ecSAndrey Grodzovsky mod_timer(&ring->fence_drv.fallback_timer, 2598c5e13ecSAndrey Grodzovsky jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 2608c5e13ecSAndrey Grodzovsky } 2618c5e13ecSAndrey Grodzovsky 2628c5e13ecSAndrey Grodzovsky /** 263ca08e04dSChristian König * amdgpu_fence_process - check for fence activity 264d38ceaf9SAlex Deucher * 265d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 266d38ceaf9SAlex Deucher * 267d38ceaf9SAlex Deucher * Checks the current fence value and calculates the last 268ca08e04dSChristian König * signalled fence value. Wakes the fence queue if the 269ca08e04dSChristian König * sequence number has increased. 27095d7fc4aSAndrey Grodzovsky * 27195d7fc4aSAndrey Grodzovsky * Returns true if fence was processed 272d38ceaf9SAlex Deucher */ 27395d7fc4aSAndrey Grodzovsky bool amdgpu_fence_process(struct amdgpu_ring *ring) 274d38ceaf9SAlex Deucher { 2754a7d74f1SChristian König struct amdgpu_fence_driver *drv = &ring->fence_drv; 27645a80abeSAlex Deucher struct amdgpu_device *adev = ring->adev; 277742c085fSChristian König uint32_t seq, last_seq; 278d38ceaf9SAlex Deucher 279d38ceaf9SAlex Deucher do { 280742c085fSChristian König last_seq = atomic_read(&ring->fence_drv.last_seq); 281d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 282d38ceaf9SAlex Deucher 283742c085fSChristian König } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 284d38ceaf9SAlex Deucher 2853547e3cfSAndrey Grodzovsky if (del_timer(&ring->fence_drv.fallback_timer) && 2863547e3cfSAndrey Grodzovsky seq != ring->fence_drv.sync_seq) 2878c5e13ecSAndrey Grodzovsky amdgpu_fence_schedule_fallback(ring); 2888c5e13ecSAndrey Grodzovsky 2892ef004d9SChristian König if (unlikely(seq == last_seq)) 29095d7fc4aSAndrey Grodzovsky return false; 2912ef004d9SChristian König 2924f399a08SChristian König last_seq &= drv->num_fences_mask; 2934f399a08SChristian König seq &= drv->num_fences_mask; 2944f399a08SChristian König 2952ef004d9SChristian König do { 296f54d1867SChris Wilson struct dma_fence *fence, **ptr; 2974a7d74f1SChristian König 2984f399a08SChristian König ++last_seq; 2994f399a08SChristian König last_seq &= drv->num_fences_mask; 3004f399a08SChristian König ptr = &drv->fences[last_seq]; 3014a7d74f1SChristian König 3024a7d74f1SChristian König /* There is always exactly one thread signaling this fence slot */ 3034a7d74f1SChristian König fence = rcu_dereference_protected(*ptr, 1); 30484fae133SMuhammad Falak R Wani RCU_INIT_POINTER(*ptr, NULL); 3054a7d74f1SChristian König 3064f399a08SChristian König if (!fence) 3074f399a08SChristian König continue; 3084a7d74f1SChristian König 309d72277b6SChristian König dma_fence_signal(fence); 310f54d1867SChris Wilson dma_fence_put(fence); 3114a580877SLuben Tuikov pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 3124a580877SLuben Tuikov pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 3132ef004d9SChristian König } while (last_seq != seq); 31495d7fc4aSAndrey Grodzovsky 31595d7fc4aSAndrey Grodzovsky return true; 316e0d8f3c3SChunming Zhou } 317d38ceaf9SAlex Deucher 318d38ceaf9SAlex Deucher /** 3198c5e13ecSAndrey Grodzovsky * amdgpu_fence_fallback - fallback for hardware interrupts 3208c5e13ecSAndrey Grodzovsky * 321f02f8c32SLee Jones * @t: timer context used to obtain the pointer to ring structure 3228c5e13ecSAndrey Grodzovsky * 3238c5e13ecSAndrey Grodzovsky * Checks for fence activity. 3248c5e13ecSAndrey Grodzovsky */ 3258c5e13ecSAndrey Grodzovsky static void amdgpu_fence_fallback(struct timer_list *t) 3268c5e13ecSAndrey Grodzovsky { 3278c5e13ecSAndrey Grodzovsky struct amdgpu_ring *ring = from_timer(ring, t, 3288c5e13ecSAndrey Grodzovsky fence_drv.fallback_timer); 3298c5e13ecSAndrey Grodzovsky 33095d7fc4aSAndrey Grodzovsky if (amdgpu_fence_process(ring)) 3313547e3cfSAndrey Grodzovsky DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); 3328c5e13ecSAndrey Grodzovsky } 3338c5e13ecSAndrey Grodzovsky 3348c5e13ecSAndrey Grodzovsky /** 335d38ceaf9SAlex Deucher * amdgpu_fence_wait_empty - wait for all fences to signal 336d38ceaf9SAlex Deucher * 337d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 338d38ceaf9SAlex Deucher * 339d38ceaf9SAlex Deucher * Wait for all fences on the requested ring to signal (all asics). 340d38ceaf9SAlex Deucher * Returns 0 if the fences have passed, error for all other cases. 341d38ceaf9SAlex Deucher */ 342d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 343d38ceaf9SAlex Deucher { 3446aa7de05SMark Rutland uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 345f54d1867SChris Wilson struct dma_fence *fence, **ptr; 346f09c2be4SChristian König int r; 34700d2a2b2SChristian König 3487f06c236Smonk.liu if (!seq) 349d38ceaf9SAlex Deucher return 0; 350d38ceaf9SAlex Deucher 351f09c2be4SChristian König ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 352f09c2be4SChristian König rcu_read_lock(); 353f09c2be4SChristian König fence = rcu_dereference(*ptr); 354f54d1867SChris Wilson if (!fence || !dma_fence_get_rcu(fence)) { 355f09c2be4SChristian König rcu_read_unlock(); 356f09c2be4SChristian König return 0; 357f09c2be4SChristian König } 358f09c2be4SChristian König rcu_read_unlock(); 359f09c2be4SChristian König 360f54d1867SChris Wilson r = dma_fence_wait(fence, false); 361f54d1867SChris Wilson dma_fence_put(fence); 362f09c2be4SChristian König return r; 363d38ceaf9SAlex Deucher } 364d38ceaf9SAlex Deucher 365d38ceaf9SAlex Deucher /** 36643ca8efaSpding * amdgpu_fence_wait_polling - busy wait for givn sequence number 36743ca8efaSpding * 36843ca8efaSpding * @ring: ring index the fence is associated with 36943ca8efaSpding * @wait_seq: sequence number to wait 37043ca8efaSpding * @timeout: the timeout for waiting in usecs 37143ca8efaSpding * 37243ca8efaSpding * Wait for all fences on the requested ring to signal (all asics). 37343ca8efaSpding * Returns left time if no timeout, 0 or minus if timeout. 37443ca8efaSpding */ 37543ca8efaSpding signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 37643ca8efaSpding uint32_t wait_seq, 37743ca8efaSpding signed long timeout) 37843ca8efaSpding { 37943ca8efaSpding 380*6e87c422SAlex Sierra while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) { 381*6e87c422SAlex Sierra udelay(2); 382*6e87c422SAlex Sierra timeout -= 2; 383*6e87c422SAlex Sierra } 38443ca8efaSpding return timeout > 0 ? timeout : 0; 38543ca8efaSpding } 38643ca8efaSpding /** 387d38ceaf9SAlex Deucher * amdgpu_fence_count_emitted - get the count of emitted fences 388d38ceaf9SAlex Deucher * 389d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 390d38ceaf9SAlex Deucher * 391d38ceaf9SAlex Deucher * Get the number of fences emitted on the requested ring (all asics). 392d38ceaf9SAlex Deucher * Returns the number of emitted fences on the ring. Used by the 393d38ceaf9SAlex Deucher * dynpm code to ring track activity. 394d38ceaf9SAlex Deucher */ 3959e690184SSrinivasan Shanmugam unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 396d38ceaf9SAlex Deucher { 397d38ceaf9SAlex Deucher uint64_t emitted; 398d38ceaf9SAlex Deucher 399d38ceaf9SAlex Deucher /* We are not protected by ring lock when reading the last sequence 400d38ceaf9SAlex Deucher * but it's ok to report slightly wrong fence count here. 401d38ceaf9SAlex Deucher */ 402742c085fSChristian König emitted = 0x100000000ull; 403742c085fSChristian König emitted -= atomic_read(&ring->fence_drv.last_seq); 4046aa7de05SMark Rutland emitted += READ_ONCE(ring->fence_drv.sync_seq); 405742c085fSChristian König return lower_32_bits(emitted); 406d38ceaf9SAlex Deucher } 407d38ceaf9SAlex Deucher 408d38ceaf9SAlex Deucher /** 4093f4c175dSJiadong.Zhu * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now 4103f4c175dSJiadong.Zhu * @ring: ring the fence is associated with 4113f4c175dSJiadong.Zhu * 4123f4c175dSJiadong.Zhu * Find the earliest fence unsignaled until now, calculate the time delta 4133f4c175dSJiadong.Zhu * between the time fence emitted and now. 4143f4c175dSJiadong.Zhu */ 4153f4c175dSJiadong.Zhu u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring) 4163f4c175dSJiadong.Zhu { 4173f4c175dSJiadong.Zhu struct amdgpu_fence_driver *drv = &ring->fence_drv; 4183f4c175dSJiadong.Zhu struct dma_fence *fence; 4193f4c175dSJiadong.Zhu uint32_t last_seq, sync_seq; 4203f4c175dSJiadong.Zhu 4213f4c175dSJiadong.Zhu last_seq = atomic_read(&ring->fence_drv.last_seq); 4223f4c175dSJiadong.Zhu sync_seq = READ_ONCE(ring->fence_drv.sync_seq); 4233f4c175dSJiadong.Zhu if (last_seq == sync_seq) 4243f4c175dSJiadong.Zhu return 0; 4253f4c175dSJiadong.Zhu 4263f4c175dSJiadong.Zhu ++last_seq; 4273f4c175dSJiadong.Zhu last_seq &= drv->num_fences_mask; 4283f4c175dSJiadong.Zhu fence = drv->fences[last_seq]; 4293f4c175dSJiadong.Zhu if (!fence) 4303f4c175dSJiadong.Zhu return 0; 4313f4c175dSJiadong.Zhu 4323f4c175dSJiadong.Zhu return ktime_us_delta(ktime_get(), 4333f4c175dSJiadong.Zhu to_amdgpu_fence(fence)->start_timestamp); 4343f4c175dSJiadong.Zhu } 4353f4c175dSJiadong.Zhu 4363f4c175dSJiadong.Zhu /** 4373f4c175dSJiadong.Zhu * amdgpu_fence_update_start_timestamp - update the timestamp of the fence 4383f4c175dSJiadong.Zhu * @ring: ring the fence is associated with 4393f4c175dSJiadong.Zhu * @seq: the fence seq number to update. 4403f4c175dSJiadong.Zhu * @timestamp: the start timestamp to update. 4413f4c175dSJiadong.Zhu * 4423f4c175dSJiadong.Zhu * The function called at the time the fence and related ib is about to 4433f4c175dSJiadong.Zhu * resubmit to gpu in MCBP scenario. Thus we do not consider race condition 4443f4c175dSJiadong.Zhu * with amdgpu_fence_process to modify the same fence. 4453f4c175dSJiadong.Zhu */ 4463f4c175dSJiadong.Zhu void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp) 4473f4c175dSJiadong.Zhu { 4483f4c175dSJiadong.Zhu struct amdgpu_fence_driver *drv = &ring->fence_drv; 4493f4c175dSJiadong.Zhu struct dma_fence *fence; 4503f4c175dSJiadong.Zhu 4513f4c175dSJiadong.Zhu seq &= drv->num_fences_mask; 4523f4c175dSJiadong.Zhu fence = drv->fences[seq]; 4533f4c175dSJiadong.Zhu if (!fence) 4543f4c175dSJiadong.Zhu return; 4553f4c175dSJiadong.Zhu 4563f4c175dSJiadong.Zhu to_amdgpu_fence(fence)->start_timestamp = timestamp; 4573f4c175dSJiadong.Zhu } 4583f4c175dSJiadong.Zhu 4593f4c175dSJiadong.Zhu /** 460d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring - make the fence driver 461d38ceaf9SAlex Deucher * ready for use on the requested ring. 462d38ceaf9SAlex Deucher * 463d38ceaf9SAlex Deucher * @ring: ring to start the fence driver on 464d38ceaf9SAlex Deucher * @irq_src: interrupt source to use for this ring 465d38ceaf9SAlex Deucher * @irq_type: interrupt type to use for this ring 466d38ceaf9SAlex Deucher * 467d38ceaf9SAlex Deucher * Make the fence driver ready for processing (all asics). 468d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 469d38ceaf9SAlex Deucher * start the fence driver on the rings it has. 470d38ceaf9SAlex Deucher * Returns 0 for success, errors for failure. 471d38ceaf9SAlex Deucher */ 472d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 473d38ceaf9SAlex Deucher struct amdgpu_irq_src *irq_src, 4749e690184SSrinivasan Shanmugam unsigned int irq_type) 475d38ceaf9SAlex Deucher { 476d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 477d38ceaf9SAlex Deucher uint64_t index; 478d38ceaf9SAlex Deucher 479d9e98ee2SLeo Liu if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { 480ae9fd76fSJack Xiao ring->fence_drv.cpu_addr = ring->fence_cpu_addr; 481ae9fd76fSJack Xiao ring->fence_drv.gpu_addr = ring->fence_gpu_addr; 482d38ceaf9SAlex Deucher } else { 483d38ceaf9SAlex Deucher /* put fence directly behind firmware */ 484d38ceaf9SAlex Deucher index = ALIGN(adev->uvd.fw->size, 8); 48510dd74eaSJames Zhu ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; 48610dd74eaSJames Zhu ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; 487d38ceaf9SAlex Deucher } 488742c085fSChristian König amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 48955611b50SJack Xiao 490d38ceaf9SAlex Deucher ring->fence_drv.irq_src = irq_src; 491d38ceaf9SAlex Deucher ring->fence_drv.irq_type = irq_type; 492c6a4079bSChunming Zhou ring->fence_drv.initialized = true; 493c6a4079bSChunming Zhou 494e241df69STiezhu Yang DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n", 495e241df69STiezhu Yang ring->name, ring->fence_drv.gpu_addr); 496d38ceaf9SAlex Deucher return 0; 497d38ceaf9SAlex Deucher } 498d38ceaf9SAlex Deucher 499d38ceaf9SAlex Deucher /** 500d38ceaf9SAlex Deucher * amdgpu_fence_driver_init_ring - init the fence driver 501d38ceaf9SAlex Deucher * for the requested ring. 502d38ceaf9SAlex Deucher * 503d38ceaf9SAlex Deucher * @ring: ring to init the fence driver on 504d38ceaf9SAlex Deucher * 505d38ceaf9SAlex Deucher * Init the fence driver for the requested ring (all asics). 506d38ceaf9SAlex Deucher * Helper function for amdgpu_fence_driver_init(). 507d38ceaf9SAlex Deucher */ 5085fd8518dSAndrey Grodzovsky int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 509d38ceaf9SAlex Deucher { 510912dfc84SEvan Quan struct amdgpu_device *adev = ring->adev; 511d38ceaf9SAlex Deucher 512912dfc84SEvan Quan if (!adev) 513912dfc84SEvan Quan return -EINVAL; 514912dfc84SEvan Quan 5155fd8518dSAndrey Grodzovsky if (!is_power_of_2(ring->num_hw_submission)) 516e6151a08SChristian König return -EINVAL; 517e6151a08SChristian König 518d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = NULL; 519d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = 0; 5205907a0d8SChristian König ring->fence_drv.sync_seq = 0; 521742c085fSChristian König atomic_set(&ring->fence_drv.last_seq, 0); 522d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 523d38ceaf9SAlex Deucher 5248c5e13ecSAndrey Grodzovsky timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 5258c5e13ecSAndrey Grodzovsky 5265fd8518dSAndrey Grodzovsky ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; 5274a7d74f1SChristian König spin_lock_init(&ring->fence_drv.lock); 5285fd8518dSAndrey Grodzovsky ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), 529c89377d1SChristian König GFP_KERNEL); 5305fd8518dSAndrey Grodzovsky 531c89377d1SChristian König if (!ring->fence_drv.fences) 532c89377d1SChristian König return -ENOMEM; 5335ec92a76SChristian König 5344f839a24SChristian König return 0; 5354f839a24SChristian König } 5364f839a24SChristian König 537d38ceaf9SAlex Deucher /** 538067f44c8SGuchun Chen * amdgpu_fence_driver_sw_init - init the fence driver 539d38ceaf9SAlex Deucher * for all possible rings. 540d38ceaf9SAlex Deucher * 541d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 542d38ceaf9SAlex Deucher * 543d38ceaf9SAlex Deucher * Init the fence driver for all possible rings (all asics). 544d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 545d38ceaf9SAlex Deucher * start the fence driver on the rings it has using 546d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring(). 547d38ceaf9SAlex Deucher * Returns 0 for success. 548d38ceaf9SAlex Deucher */ 549067f44c8SGuchun Chen int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) 550d38ceaf9SAlex Deucher { 551d38ceaf9SAlex Deucher return 0; 552d38ceaf9SAlex Deucher } 553d38ceaf9SAlex Deucher 554d38ceaf9SAlex Deucher /** 555067f44c8SGuchun Chen * amdgpu_fence_driver_hw_fini - tear down the fence driver 556d38ceaf9SAlex Deucher * for all possible rings. 557d38ceaf9SAlex Deucher * 558d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 559d38ceaf9SAlex Deucher * 560d38ceaf9SAlex Deucher * Tear down the fence driver for all possible rings (all asics). 561d38ceaf9SAlex Deucher */ 5628d35a259SLikun Gao void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) 563d38ceaf9SAlex Deucher { 56454a85db8SAndrey Grodzovsky int i, r; 565d38ceaf9SAlex Deucher 566d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 567d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 568c2776afeSChristian König 569d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 570d38ceaf9SAlex Deucher continue; 571067f44c8SGuchun Chen 57254a85db8SAndrey Grodzovsky /* You can't wait for HW to signal if it's gone */ 573c58a863bSGuchun Chen if (!drm_dev_is_unplugged(adev_to_drm(adev))) 574d38ceaf9SAlex Deucher r = amdgpu_fence_wait_empty(ring); 57554a85db8SAndrey Grodzovsky else 57654a85db8SAndrey Grodzovsky r = -ENODEV; 577d38ceaf9SAlex Deucher /* no need to trigger GPU reset as we are unloading */ 57854a85db8SAndrey Grodzovsky if (r) 5792f9d4084SMonk Liu amdgpu_fence_driver_force_completion(ring); 58054a85db8SAndrey Grodzovsky 58155611b50SJack Xiao if (ring->fence_drv.irq_src) 582c6a4079bSChunming Zhou amdgpu_irq_put(adev, ring->fence_drv.irq_src, 583c6a4079bSChunming Zhou ring->fence_drv.irq_type); 584bb0cd09bSEmily Deng 5858c5e13ecSAndrey Grodzovsky del_timer_sync(&ring->fence_drv.fallback_timer); 58672c8c97bSAndrey Grodzovsky } 58772c8c97bSAndrey Grodzovsky } 58872c8c97bSAndrey Grodzovsky 5899e225fb9SAndrey Grodzovsky /* Will either stop and flush handlers for amdgpu interrupt or reanble it */ 5909e225fb9SAndrey Grodzovsky void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop) 5919e225fb9SAndrey Grodzovsky { 5929e225fb9SAndrey Grodzovsky int i; 5939e225fb9SAndrey Grodzovsky 5949e225fb9SAndrey Grodzovsky for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 5959e225fb9SAndrey Grodzovsky struct amdgpu_ring *ring = adev->rings[i]; 5969e225fb9SAndrey Grodzovsky 5979e225fb9SAndrey Grodzovsky if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src) 5989e225fb9SAndrey Grodzovsky continue; 5999e225fb9SAndrey Grodzovsky 6009e225fb9SAndrey Grodzovsky if (stop) 6019e225fb9SAndrey Grodzovsky disable_irq(adev->irq.irq); 6029e225fb9SAndrey Grodzovsky else 6039e225fb9SAndrey Grodzovsky enable_irq(adev->irq.irq); 6049e225fb9SAndrey Grodzovsky } 6059e225fb9SAndrey Grodzovsky } 6069e225fb9SAndrey Grodzovsky 6078d35a259SLikun Gao void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) 60872c8c97bSAndrey Grodzovsky { 60972c8c97bSAndrey Grodzovsky unsigned int i, j; 61072c8c97bSAndrey Grodzovsky 61172c8c97bSAndrey Grodzovsky for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 61272c8c97bSAndrey Grodzovsky struct amdgpu_ring *ring = adev->rings[i]; 61372c8c97bSAndrey Grodzovsky 61472c8c97bSAndrey Grodzovsky if (!ring || !ring->fence_drv.initialized) 61572c8c97bSAndrey Grodzovsky continue; 61672c8c97bSAndrey Grodzovsky 6175ad7bbf3SGuilherme G. Piccoli /* 6185ad7bbf3SGuilherme G. Piccoli * Notice we check for sched.ops since there's some 6195ad7bbf3SGuilherme G. Piccoli * override on the meaning of sched.ready by amdgpu. 6205ad7bbf3SGuilherme G. Piccoli * The natural check would be sched.ready, which is 6215ad7bbf3SGuilherme G. Piccoli * set as drm_sched_init() finishes... 6225ad7bbf3SGuilherme G. Piccoli */ 6235ad7bbf3SGuilherme G. Piccoli if (ring->sched.ops) 624067f44c8SGuchun Chen drm_sched_fini(&ring->sched); 625067f44c8SGuchun Chen 626c89377d1SChristian König for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 627f54d1867SChris Wilson dma_fence_put(ring->fence_drv.fences[j]); 628c89377d1SChristian König kfree(ring->fence_drv.fences); 62954ddf3a6SGrazvydas Ignotas ring->fence_drv.fences = NULL; 630d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 631d38ceaf9SAlex Deucher } 632d38ceaf9SAlex Deucher } 633d38ceaf9SAlex Deucher 634d38ceaf9SAlex Deucher /** 6358d35a259SLikun Gao * amdgpu_fence_driver_hw_init - enable the fence driver 6365ceb54c6SAlex Deucher * for all possible rings. 6375ceb54c6SAlex Deucher * 6385ceb54c6SAlex Deucher * @adev: amdgpu device pointer 6395ceb54c6SAlex Deucher * 6408d35a259SLikun Gao * Enable the fence driver for all possible rings (all asics). 6415ceb54c6SAlex Deucher * Not all asics have all rings, so each asic will only 6425ceb54c6SAlex Deucher * start the fence driver on the rings it has using 6435ceb54c6SAlex Deucher * amdgpu_fence_driver_start_ring(). 6445ceb54c6SAlex Deucher * Returns 0 for success. 6455ceb54c6SAlex Deucher */ 6468d35a259SLikun Gao void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) 6475ceb54c6SAlex Deucher { 6485ceb54c6SAlex Deucher int i; 6495ceb54c6SAlex Deucher 6505ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 6515ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 6529e690184SSrinivasan Shanmugam 6535ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 6545ceb54c6SAlex Deucher continue; 6555ceb54c6SAlex Deucher 6565ceb54c6SAlex Deucher /* enable the interrupt */ 65755611b50SJack Xiao if (ring->fence_drv.irq_src) 6585ceb54c6SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 6595ceb54c6SAlex Deucher ring->fence_drv.irq_type); 6605ceb54c6SAlex Deucher } 6615ceb54c6SAlex Deucher } 6625ceb54c6SAlex Deucher 6635ceb54c6SAlex Deucher /** 664bf67014dSHuang Rui * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring 665bf67014dSHuang Rui * 666bf67014dSHuang Rui * @ring: fence of the ring to be cleared 667bf67014dSHuang Rui * 668bf67014dSHuang Rui */ 669bf67014dSHuang Rui void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) 670bf67014dSHuang Rui { 671bf67014dSHuang Rui int i; 672bf67014dSHuang Rui struct dma_fence *old, **ptr; 673bf67014dSHuang Rui 674bf67014dSHuang Rui for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { 675bf67014dSHuang Rui ptr = &ring->fence_drv.fences[i]; 676bf67014dSHuang Rui old = rcu_dereference_protected(*ptr, 1); 677dd70748eSAndrey Grodzovsky if (old && old->ops == &amdgpu_job_fence_ops) { 678033c5647SYuBiao Wang struct amdgpu_job *job; 679033c5647SYuBiao Wang 680033c5647SYuBiao Wang /* For non-scheduler bad job, i.e. failed ib test, we need to signal 681033c5647SYuBiao Wang * it right here or we won't be able to track them in fence_drv 682033c5647SYuBiao Wang * and they will remain unsignaled during sa_bo free. 683033c5647SYuBiao Wang */ 684033c5647SYuBiao Wang job = container_of(old, struct amdgpu_job, hw_fence); 685033c5647SYuBiao Wang if (!job->base.s_fence && !dma_fence_is_signaled(old)) 686033c5647SYuBiao Wang dma_fence_signal(old); 687bf67014dSHuang Rui RCU_INIT_POINTER(*ptr, NULL); 688dd70748eSAndrey Grodzovsky dma_fence_put(old); 689dd70748eSAndrey Grodzovsky } 690bf67014dSHuang Rui } 691bf67014dSHuang Rui } 692bf67014dSHuang Rui 693bf67014dSHuang Rui /** 6942f9d4084SMonk Liu * amdgpu_fence_driver_force_completion - force signal latest fence of ring 695d38ceaf9SAlex Deucher * 6962f9d4084SMonk Liu * @ring: fence of the ring to signal 697d38ceaf9SAlex Deucher * 698d38ceaf9SAlex Deucher */ 6992f9d4084SMonk Liu void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 700d38ceaf9SAlex Deucher { 7015907a0d8SChristian König amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 7022f9d4084SMonk Liu amdgpu_fence_process(ring); 70365781c78SMonk Liu } 70465781c78SMonk Liu 705a95e2642SChristian König /* 706a95e2642SChristian König * Common fence implementation 707a95e2642SChristian König */ 708a95e2642SChristian König 709f54d1867SChris Wilson static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 710a95e2642SChristian König { 711a95e2642SChristian König return "amdgpu"; 712a95e2642SChristian König } 713a95e2642SChristian König 714f54d1867SChris Wilson static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 715a95e2642SChristian König { 716bf67014dSHuang Rui return (const char *)to_amdgpu_fence(f)->ring->name; 717bf67014dSHuang Rui } 718c530b02fSJack Zhang 719bf67014dSHuang Rui static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) 720bf67014dSHuang Rui { 721c530b02fSJack Zhang struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 722c530b02fSJack Zhang 723bf67014dSHuang Rui return (const char *)to_amdgpu_ring(job->base.sched)->name; 724a95e2642SChristian König } 725a95e2642SChristian König 726a95e2642SChristian König /** 7278c5e13ecSAndrey Grodzovsky * amdgpu_fence_enable_signaling - enable signalling on fence 728f02f8c32SLee Jones * @f: fence 7298c5e13ecSAndrey Grodzovsky * 7308c5e13ecSAndrey Grodzovsky * This function is called with fence_queue lock held, and adds a callback 7318c5e13ecSAndrey Grodzovsky * to fence_queue that checks if this fence is signaled, and if so it 7328c5e13ecSAndrey Grodzovsky * signals the fence and removes itself. 7338c5e13ecSAndrey Grodzovsky */ 7348c5e13ecSAndrey Grodzovsky static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 7358c5e13ecSAndrey Grodzovsky { 736bf67014dSHuang Rui if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) 737bf67014dSHuang Rui amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); 738c530b02fSJack Zhang 739bf67014dSHuang Rui return true; 740c530b02fSJack Zhang } 7418c5e13ecSAndrey Grodzovsky 742bf67014dSHuang Rui /** 743bf67014dSHuang Rui * amdgpu_job_fence_enable_signaling - enable signalling on job fence 744bf67014dSHuang Rui * @f: fence 745bf67014dSHuang Rui * 746bf67014dSHuang Rui * This is the simliar function with amdgpu_fence_enable_signaling above, it 747bf67014dSHuang Rui * only handles the job embedded fence. 748bf67014dSHuang Rui */ 749bf67014dSHuang Rui static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) 750bf67014dSHuang Rui { 751bf67014dSHuang Rui struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); 752bf67014dSHuang Rui 753bf67014dSHuang Rui if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) 754bf67014dSHuang Rui amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); 7558c5e13ecSAndrey Grodzovsky 7568c5e13ecSAndrey Grodzovsky return true; 7578c5e13ecSAndrey Grodzovsky } 7588c5e13ecSAndrey Grodzovsky 7598c5e13ecSAndrey Grodzovsky /** 760b4413535SChristian König * amdgpu_fence_free - free up the fence memory 761b4413535SChristian König * 762b4413535SChristian König * @rcu: RCU callback head 763b4413535SChristian König * 764b4413535SChristian König * Free up the fence memory after the RCU grace period. 765b4413535SChristian König */ 766b4413535SChristian König static void amdgpu_fence_free(struct rcu_head *rcu) 767b49c84a5SChunming Zhou { 768f54d1867SChris Wilson struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 769c530b02fSJack Zhang 770c530b02fSJack Zhang /* free fence_slab if it's separated fence*/ 771bf67014dSHuang Rui kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); 772b49c84a5SChunming Zhou } 773bf67014dSHuang Rui 774bf67014dSHuang Rui /** 775bf67014dSHuang Rui * amdgpu_job_fence_free - free up the job with embedded fence 776bf67014dSHuang Rui * 777bf67014dSHuang Rui * @rcu: RCU callback head 778bf67014dSHuang Rui * 779bf67014dSHuang Rui * Free up the job with embedded fence after the RCU grace period. 780bf67014dSHuang Rui */ 781bf67014dSHuang Rui static void amdgpu_job_fence_free(struct rcu_head *rcu) 782bf67014dSHuang Rui { 783bf67014dSHuang Rui struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 784bf67014dSHuang Rui 785bf67014dSHuang Rui /* free job if fence has a parent job */ 786bf67014dSHuang Rui kfree(container_of(f, struct amdgpu_job, hw_fence)); 787c530b02fSJack Zhang } 788b49c84a5SChunming Zhou 789b4413535SChristian König /** 790b4413535SChristian König * amdgpu_fence_release - callback that fence can be freed 791b4413535SChristian König * 792f02f8c32SLee Jones * @f: fence 793b4413535SChristian König * 794b4413535SChristian König * This function is called when the reference count becomes zero. 795b4413535SChristian König * It just RCU schedules freeing up the fence. 796b4413535SChristian König */ 797f54d1867SChris Wilson static void amdgpu_fence_release(struct dma_fence *f) 798b4413535SChristian König { 799b4413535SChristian König call_rcu(&f->rcu, amdgpu_fence_free); 800b4413535SChristian König } 801b4413535SChristian König 802bf67014dSHuang Rui /** 803bf67014dSHuang Rui * amdgpu_job_fence_release - callback that job embedded fence can be freed 804bf67014dSHuang Rui * 805bf67014dSHuang Rui * @f: fence 806bf67014dSHuang Rui * 807bf67014dSHuang Rui * This is the simliar function with amdgpu_fence_release above, it 808bf67014dSHuang Rui * only handles the job embedded fence. 809bf67014dSHuang Rui */ 810bf67014dSHuang Rui static void amdgpu_job_fence_release(struct dma_fence *f) 811bf67014dSHuang Rui { 812bf67014dSHuang Rui call_rcu(&f->rcu, amdgpu_job_fence_free); 813bf67014dSHuang Rui } 814bf67014dSHuang Rui 815f54d1867SChris Wilson static const struct dma_fence_ops amdgpu_fence_ops = { 816a95e2642SChristian König .get_driver_name = amdgpu_fence_get_driver_name, 817a95e2642SChristian König .get_timeline_name = amdgpu_fence_get_timeline_name, 8188c5e13ecSAndrey Grodzovsky .enable_signaling = amdgpu_fence_enable_signaling, 819b49c84a5SChunming Zhou .release = amdgpu_fence_release, 820a95e2642SChristian König }; 821d38ceaf9SAlex Deucher 822bf67014dSHuang Rui static const struct dma_fence_ops amdgpu_job_fence_ops = { 823bf67014dSHuang Rui .get_driver_name = amdgpu_fence_get_driver_name, 824bf67014dSHuang Rui .get_timeline_name = amdgpu_job_fence_get_timeline_name, 825bf67014dSHuang Rui .enable_signaling = amdgpu_job_fence_enable_signaling, 826bf67014dSHuang Rui .release = amdgpu_job_fence_release, 827bf67014dSHuang Rui }; 828c530b02fSJack Zhang 829d38ceaf9SAlex Deucher /* 830d38ceaf9SAlex Deucher * Fence debugfs 831d38ceaf9SAlex Deucher */ 832d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 83398d28ac2SNirmoy Das static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused) 834d38ceaf9SAlex Deucher { 83598d28ac2SNirmoy Das struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 8365907a0d8SChristian König int i; 837d38ceaf9SAlex Deucher 838d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 839d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 8409e690184SSrinivasan Shanmugam 841d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 842d38ceaf9SAlex Deucher continue; 843d38ceaf9SAlex Deucher 844d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 845d38ceaf9SAlex Deucher 846344c19f9SChristian König seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 847742c085fSChristian König seq_printf(m, "Last signaled fence 0x%08x\n", 848742c085fSChristian König atomic_read(&ring->fence_drv.last_seq)); 849742c085fSChristian König seq_printf(m, "Last emitted 0x%08x\n", 8505907a0d8SChristian König ring->fence_drv.sync_seq); 851e71de076Spding 852ef3e1323SJack Xiao if (ring->funcs->type == AMDGPU_RING_TYPE_GFX || 853ef3e1323SJack Xiao ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { 854ef3e1323SJack Xiao seq_printf(m, "Last signaled trailing fence 0x%08x\n", 855ef3e1323SJack Xiao le32_to_cpu(*ring->trail_fence_cpu_addr)); 856ef3e1323SJack Xiao seq_printf(m, "Last emitted 0x%08x\n", 857ef3e1323SJack Xiao ring->trail_seq); 858ef3e1323SJack Xiao } 859ef3e1323SJack Xiao 860e71de076Spding if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 861e71de076Spding continue; 862e71de076Spding 863e71de076Spding /* set in CP_VMID_PREEMPT and preemption occurred */ 864e71de076Spding seq_printf(m, "Last preempted 0x%08x\n", 865e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 866e71de076Spding /* set in CP_VMID_RESET and reset occurred */ 867e71de076Spding seq_printf(m, "Last reset 0x%08x\n", 868e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 869e71de076Spding /* Both preemption and reset occurred */ 870e71de076Spding seq_printf(m, "Last both 0x%08x\n", 871e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 872d38ceaf9SAlex Deucher } 873d38ceaf9SAlex Deucher return 0; 874d38ceaf9SAlex Deucher } 875d38ceaf9SAlex Deucher 876f02f8c32SLee Jones /* 8775740682eSMonk Liu * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover 87818db89b4SAlex Deucher * 87918db89b4SAlex Deucher * Manually trigger a gpu reset at the next fence wait. 88018db89b4SAlex Deucher */ 88198d28ac2SNirmoy Das static int gpu_recover_get(void *data, u64 *val) 88218db89b4SAlex Deucher { 88398d28ac2SNirmoy Das struct amdgpu_device *adev = (struct amdgpu_device *)data; 88498d28ac2SNirmoy Das struct drm_device *dev = adev_to_drm(adev); 885a9ffe2a9SAlex Deucher int r; 886a9ffe2a9SAlex Deucher 887a9ffe2a9SAlex Deucher r = pm_runtime_get_sync(dev->dev); 888e520d3e0SAlex Deucher if (r < 0) { 889e520d3e0SAlex Deucher pm_runtime_put_autosuspend(dev->dev); 890a9ffe2a9SAlex Deucher return 0; 891e520d3e0SAlex Deucher } 89218db89b4SAlex Deucher 8932f83658fSAndrey Grodzovsky if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work)) 8942f83658fSAndrey Grodzovsky flush_work(&adev->reset_work); 8952f83658fSAndrey Grodzovsky 8962f83658fSAndrey Grodzovsky *val = atomic_read(&adev->reset_domain->reset_res); 89718db89b4SAlex Deucher 898a9ffe2a9SAlex Deucher pm_runtime_mark_last_busy(dev->dev); 899a9ffe2a9SAlex Deucher pm_runtime_put_autosuspend(dev->dev); 900a9ffe2a9SAlex Deucher 90118db89b4SAlex Deucher return 0; 90218db89b4SAlex Deucher } 90318db89b4SAlex Deucher 90498d28ac2SNirmoy Das DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info); 90598d28ac2SNirmoy Das DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL, 90698d28ac2SNirmoy Das "%lld\n"); 9074fbf87e2SMonk Liu 9082f83658fSAndrey Grodzovsky static void amdgpu_debugfs_reset_work(struct work_struct *work) 9092f83658fSAndrey Grodzovsky { 9102f83658fSAndrey Grodzovsky struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 9112f83658fSAndrey Grodzovsky reset_work); 9122f83658fSAndrey Grodzovsky 913f1549c09SLikun Gao struct amdgpu_reset_context reset_context; 9149e690184SSrinivasan Shanmugam 915f1549c09SLikun Gao memset(&reset_context, 0, sizeof(reset_context)); 916f1549c09SLikun Gao 917f1549c09SLikun Gao reset_context.method = AMD_RESET_METHOD_NONE; 918f1549c09SLikun Gao reset_context.reset_req_dev = adev; 919f1549c09SLikun Gao set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 920f1549c09SLikun Gao 921f1549c09SLikun Gao amdgpu_device_gpu_recover(adev, NULL, &reset_context); 9222f83658fSAndrey Grodzovsky } 9232f83658fSAndrey Grodzovsky 924d38ceaf9SAlex Deucher #endif 925d38ceaf9SAlex Deucher 92698d28ac2SNirmoy Das void amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 927d38ceaf9SAlex Deucher { 928d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 92998d28ac2SNirmoy Das struct drm_minor *minor = adev_to_drm(adev)->primary; 93098d28ac2SNirmoy Das struct dentry *root = minor->debugfs_root; 93198d28ac2SNirmoy Das 93298d28ac2SNirmoy Das debugfs_create_file("amdgpu_fence_info", 0444, root, adev, 93398d28ac2SNirmoy Das &amdgpu_debugfs_fence_info_fops); 93498d28ac2SNirmoy Das 9352f83658fSAndrey Grodzovsky if (!amdgpu_sriov_vf(adev)) { 9362f83658fSAndrey Grodzovsky 9372f83658fSAndrey Grodzovsky INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work); 93898d28ac2SNirmoy Das debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev, 93998d28ac2SNirmoy Das &amdgpu_debugfs_gpu_recover_fops); 9402f83658fSAndrey Grodzovsky } 941d38ceaf9SAlex Deucher #endif 942d38ceaf9SAlex Deucher } 943d38ceaf9SAlex Deucher 944