1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher * Dave Airlie 30d38ceaf9SAlex Deucher */ 31d38ceaf9SAlex Deucher #include <linux/seq_file.h> 32d38ceaf9SAlex Deucher #include <linux/atomic.h> 33d38ceaf9SAlex Deucher #include <linux/wait.h> 34d38ceaf9SAlex Deucher #include <linux/kref.h> 35d38ceaf9SAlex Deucher #include <linux/slab.h> 36d38ceaf9SAlex Deucher #include <linux/firmware.h> 37d38ceaf9SAlex Deucher #include <drm/drmP.h> 38d38ceaf9SAlex Deucher #include "amdgpu.h" 39d38ceaf9SAlex Deucher #include "amdgpu_trace.h" 40d38ceaf9SAlex Deucher 41d38ceaf9SAlex Deucher /* 42d38ceaf9SAlex Deucher * Fences 43d38ceaf9SAlex Deucher * Fences mark an event in the GPUs pipeline and are used 44d38ceaf9SAlex Deucher * for GPU/CPU synchronization. When the fence is written, 45d38ceaf9SAlex Deucher * it is expected that all buffers associated with that fence 46d38ceaf9SAlex Deucher * are no longer in use by the associated ring on the GPU and 47d38ceaf9SAlex Deucher * that the the relevant GPU caches have been flushed. 48d38ceaf9SAlex Deucher */ 49d38ceaf9SAlex Deucher 5022e5a2f4SChristian König struct amdgpu_fence { 51f54d1867SChris Wilson struct dma_fence base; 5222e5a2f4SChristian König 5322e5a2f4SChristian König /* RB, DMA, etc. */ 5422e5a2f4SChristian König struct amdgpu_ring *ring; 5522e5a2f4SChristian König }; 5622e5a2f4SChristian König 57b49c84a5SChunming Zhou static struct kmem_cache *amdgpu_fence_slab; 58b49c84a5SChunming Zhou 59d573de2dSRex Zhu int amdgpu_fence_slab_init(void) 60d573de2dSRex Zhu { 61d573de2dSRex Zhu amdgpu_fence_slab = kmem_cache_create( 62d573de2dSRex Zhu "amdgpu_fence", sizeof(struct amdgpu_fence), 0, 63d573de2dSRex Zhu SLAB_HWCACHE_ALIGN, NULL); 64d573de2dSRex Zhu if (!amdgpu_fence_slab) 65d573de2dSRex Zhu return -ENOMEM; 66d573de2dSRex Zhu return 0; 67d573de2dSRex Zhu } 68d573de2dSRex Zhu 69d573de2dSRex Zhu void amdgpu_fence_slab_fini(void) 70d573de2dSRex Zhu { 710f10425eSGrazvydas Ignotas rcu_barrier(); 72d573de2dSRex Zhu kmem_cache_destroy(amdgpu_fence_slab); 73d573de2dSRex Zhu } 7422e5a2f4SChristian König /* 7522e5a2f4SChristian König * Cast helper 7622e5a2f4SChristian König */ 77f54d1867SChris Wilson static const struct dma_fence_ops amdgpu_fence_ops; 78f54d1867SChris Wilson static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) 7922e5a2f4SChristian König { 8022e5a2f4SChristian König struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); 8122e5a2f4SChristian König 8222e5a2f4SChristian König if (__f->base.ops == &amdgpu_fence_ops) 8322e5a2f4SChristian König return __f; 8422e5a2f4SChristian König 8522e5a2f4SChristian König return NULL; 8622e5a2f4SChristian König } 8722e5a2f4SChristian König 88d38ceaf9SAlex Deucher /** 89d38ceaf9SAlex Deucher * amdgpu_fence_write - write a fence value 90d38ceaf9SAlex Deucher * 91d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 92d38ceaf9SAlex Deucher * @seq: sequence number to write 93d38ceaf9SAlex Deucher * 94d38ceaf9SAlex Deucher * Writes a fence value to memory (all asics). 95d38ceaf9SAlex Deucher */ 96d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) 97d38ceaf9SAlex Deucher { 98d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 99d38ceaf9SAlex Deucher 100d38ceaf9SAlex Deucher if (drv->cpu_addr) 101d38ceaf9SAlex Deucher *drv->cpu_addr = cpu_to_le32(seq); 102d38ceaf9SAlex Deucher } 103d38ceaf9SAlex Deucher 104d38ceaf9SAlex Deucher /** 105d38ceaf9SAlex Deucher * amdgpu_fence_read - read a fence value 106d38ceaf9SAlex Deucher * 107d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 108d38ceaf9SAlex Deucher * 109d38ceaf9SAlex Deucher * Reads a fence value from memory (all asics). 110d38ceaf9SAlex Deucher * Returns the value of the fence read from memory. 111d38ceaf9SAlex Deucher */ 112d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring) 113d38ceaf9SAlex Deucher { 114d38ceaf9SAlex Deucher struct amdgpu_fence_driver *drv = &ring->fence_drv; 115d38ceaf9SAlex Deucher u32 seq = 0; 116d38ceaf9SAlex Deucher 117d38ceaf9SAlex Deucher if (drv->cpu_addr) 118d38ceaf9SAlex Deucher seq = le32_to_cpu(*drv->cpu_addr); 119d38ceaf9SAlex Deucher else 120742c085fSChristian König seq = atomic_read(&drv->last_seq); 121d38ceaf9SAlex Deucher 122d38ceaf9SAlex Deucher return seq; 123d38ceaf9SAlex Deucher } 124d38ceaf9SAlex Deucher 125d38ceaf9SAlex Deucher /** 126d38ceaf9SAlex Deucher * amdgpu_fence_emit - emit a fence on the requested ring 127d38ceaf9SAlex Deucher * 128d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 129364beb2cSChristian König * @f: resulting fence object 130d38ceaf9SAlex Deucher * 131d38ceaf9SAlex Deucher * Emits a fence command on the requested ring (all asics). 132d38ceaf9SAlex Deucher * Returns 0 on success, -ENOMEM on failure. 133d38ceaf9SAlex Deucher */ 134f54d1867SChris Wilson int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) 135d38ceaf9SAlex Deucher { 136d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 137364beb2cSChristian König struct amdgpu_fence *fence; 138f54d1867SChris Wilson struct dma_fence *old, **ptr; 139742c085fSChristian König uint32_t seq; 140d38ceaf9SAlex Deucher 141364beb2cSChristian König fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 142364beb2cSChristian König if (fence == NULL) 143d38ceaf9SAlex Deucher return -ENOMEM; 144364beb2cSChristian König 145742c085fSChristian König seq = ++ring->fence_drv.sync_seq; 146364beb2cSChristian König fence->ring = ring; 147f54d1867SChris Wilson dma_fence_init(&fence->base, &amdgpu_fence_ops, 1484a7d74f1SChristian König &ring->fence_drv.lock, 1497f06c236Smonk.liu adev->fence_context + ring->idx, 150742c085fSChristian König seq); 151890ee23fSChunming Zhou amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 152742c085fSChristian König seq, AMDGPU_FENCE_FLAG_INT); 153c89377d1SChristian König 154742c085fSChristian König ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 155c89377d1SChristian König /* This function can't be called concurrently anyway, otherwise 156c89377d1SChristian König * emitting the fence would mess up the hardware ring buffer. 157c89377d1SChristian König */ 158fc387a0bSChunming Zhou old = rcu_dereference_protected(*ptr, 1); 159f54d1867SChris Wilson if (old && !dma_fence_is_signaled(old)) { 160fc387a0bSChunming Zhou DRM_INFO("rcu slot is busy\n"); 161f54d1867SChris Wilson dma_fence_wait(old, false); 162fc387a0bSChunming Zhou } 163c89377d1SChristian König 164f54d1867SChris Wilson rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); 165c89377d1SChristian König 166364beb2cSChristian König *f = &fence->base; 167c89377d1SChristian König 168d38ceaf9SAlex Deucher return 0; 169d38ceaf9SAlex Deucher } 170d38ceaf9SAlex Deucher 171d38ceaf9SAlex Deucher /** 17243ca8efaSpding * amdgpu_fence_emit_polling - emit a fence on the requeste ring 17343ca8efaSpding * 17443ca8efaSpding * @ring: ring the fence is associated with 17543ca8efaSpding * @s: resulting sequence number 17643ca8efaSpding * 17743ca8efaSpding * Emits a fence command on the requested ring (all asics). 17843ca8efaSpding * Used For polling fence. 17943ca8efaSpding * Returns 0 on success, -ENOMEM on failure. 18043ca8efaSpding */ 18143ca8efaSpding int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) 18243ca8efaSpding { 18343ca8efaSpding uint32_t seq; 18443ca8efaSpding 18543ca8efaSpding if (!s) 18643ca8efaSpding return -EINVAL; 18743ca8efaSpding 18843ca8efaSpding seq = ++ring->fence_drv.sync_seq; 18943ca8efaSpding amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 19043ca8efaSpding seq, AMDGPU_FENCE_FLAG_INT); 19143ca8efaSpding 19243ca8efaSpding *s = seq; 19343ca8efaSpding 19443ca8efaSpding return 0; 19543ca8efaSpding } 19643ca8efaSpding 19743ca8efaSpding /** 198c2776afeSChristian König * amdgpu_fence_schedule_fallback - schedule fallback check 199c2776afeSChristian König * 200c2776afeSChristian König * @ring: pointer to struct amdgpu_ring 201c2776afeSChristian König * 202c2776afeSChristian König * Start a timer as fallback to our interrupts. 203c2776afeSChristian König */ 204c2776afeSChristian König static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) 205c2776afeSChristian König { 206c2776afeSChristian König mod_timer(&ring->fence_drv.fallback_timer, 207c2776afeSChristian König jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); 208c2776afeSChristian König } 209c2776afeSChristian König 210c2776afeSChristian König /** 211ca08e04dSChristian König * amdgpu_fence_process - check for fence activity 212d38ceaf9SAlex Deucher * 213d38ceaf9SAlex Deucher * @ring: pointer to struct amdgpu_ring 214d38ceaf9SAlex Deucher * 215d38ceaf9SAlex Deucher * Checks the current fence value and calculates the last 216ca08e04dSChristian König * signalled fence value. Wakes the fence queue if the 217ca08e04dSChristian König * sequence number has increased. 218d38ceaf9SAlex Deucher */ 219ca08e04dSChristian König void amdgpu_fence_process(struct amdgpu_ring *ring) 220d38ceaf9SAlex Deucher { 2214a7d74f1SChristian König struct amdgpu_fence_driver *drv = &ring->fence_drv; 222742c085fSChristian König uint32_t seq, last_seq; 2234a7d74f1SChristian König int r; 224d38ceaf9SAlex Deucher 225d38ceaf9SAlex Deucher do { 226742c085fSChristian König last_seq = atomic_read(&ring->fence_drv.last_seq); 227d38ceaf9SAlex Deucher seq = amdgpu_fence_read(ring); 228d38ceaf9SAlex Deucher 229742c085fSChristian König } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 230d38ceaf9SAlex Deucher 231742c085fSChristian König if (seq != ring->fence_drv.sync_seq) 232c2776afeSChristian König amdgpu_fence_schedule_fallback(ring); 233d38ceaf9SAlex Deucher 2342ef004d9SChristian König if (unlikely(seq == last_seq)) 2352ef004d9SChristian König return; 2362ef004d9SChristian König 2374f399a08SChristian König last_seq &= drv->num_fences_mask; 2384f399a08SChristian König seq &= drv->num_fences_mask; 2394f399a08SChristian König 2402ef004d9SChristian König do { 241f54d1867SChris Wilson struct dma_fence *fence, **ptr; 2424a7d74f1SChristian König 2434f399a08SChristian König ++last_seq; 2444f399a08SChristian König last_seq &= drv->num_fences_mask; 2454f399a08SChristian König ptr = &drv->fences[last_seq]; 2464a7d74f1SChristian König 2474a7d74f1SChristian König /* There is always exactly one thread signaling this fence slot */ 2484a7d74f1SChristian König fence = rcu_dereference_protected(*ptr, 1); 24984fae133SMuhammad Falak R Wani RCU_INIT_POINTER(*ptr, NULL); 2504a7d74f1SChristian König 2514f399a08SChristian König if (!fence) 2524f399a08SChristian König continue; 2534a7d74f1SChristian König 254f54d1867SChris Wilson r = dma_fence_signal(fence); 2554a7d74f1SChristian König if (!r) 256f54d1867SChris Wilson DMA_FENCE_TRACE(fence, "signaled from irq context\n"); 2574a7d74f1SChristian König else 2584a7d74f1SChristian König BUG(); 2594a7d74f1SChristian König 260f54d1867SChris Wilson dma_fence_put(fence); 2612ef004d9SChristian König } while (last_seq != seq); 262e0d8f3c3SChunming Zhou } 263d38ceaf9SAlex Deucher 264d38ceaf9SAlex Deucher /** 265c2776afeSChristian König * amdgpu_fence_fallback - fallback for hardware interrupts 266c2776afeSChristian König * 267c2776afeSChristian König * @work: delayed work item 268c2776afeSChristian König * 269c2776afeSChristian König * Checks for fence activity. 270c2776afeSChristian König */ 27186cb30ecSKees Cook static void amdgpu_fence_fallback(struct timer_list *t) 272c2776afeSChristian König { 27386cb30ecSKees Cook struct amdgpu_ring *ring = from_timer(ring, t, 27486cb30ecSKees Cook fence_drv.fallback_timer); 275c2776afeSChristian König 276c2776afeSChristian König amdgpu_fence_process(ring); 277c2776afeSChristian König } 278c2776afeSChristian König 279c2776afeSChristian König /** 280d38ceaf9SAlex Deucher * amdgpu_fence_wait_empty - wait for all fences to signal 281d38ceaf9SAlex Deucher * 282d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 283d38ceaf9SAlex Deucher * @ring: ring index the fence is associated with 284d38ceaf9SAlex Deucher * 285d38ceaf9SAlex Deucher * Wait for all fences on the requested ring to signal (all asics). 286d38ceaf9SAlex Deucher * Returns 0 if the fences have passed, error for all other cases. 287d38ceaf9SAlex Deucher */ 288d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) 289d38ceaf9SAlex Deucher { 2906aa7de05SMark Rutland uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); 291f54d1867SChris Wilson struct dma_fence *fence, **ptr; 292f09c2be4SChristian König int r; 29300d2a2b2SChristian König 2947f06c236Smonk.liu if (!seq) 295d38ceaf9SAlex Deucher return 0; 296d38ceaf9SAlex Deucher 297f09c2be4SChristian König ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 298f09c2be4SChristian König rcu_read_lock(); 299f09c2be4SChristian König fence = rcu_dereference(*ptr); 300f54d1867SChris Wilson if (!fence || !dma_fence_get_rcu(fence)) { 301f09c2be4SChristian König rcu_read_unlock(); 302f09c2be4SChristian König return 0; 303f09c2be4SChristian König } 304f09c2be4SChristian König rcu_read_unlock(); 305f09c2be4SChristian König 306f54d1867SChris Wilson r = dma_fence_wait(fence, false); 307f54d1867SChris Wilson dma_fence_put(fence); 308f09c2be4SChristian König return r; 309d38ceaf9SAlex Deucher } 310d38ceaf9SAlex Deucher 311d38ceaf9SAlex Deucher /** 31243ca8efaSpding * amdgpu_fence_wait_polling - busy wait for givn sequence number 31343ca8efaSpding * 31443ca8efaSpding * @ring: ring index the fence is associated with 31543ca8efaSpding * @wait_seq: sequence number to wait 31643ca8efaSpding * @timeout: the timeout for waiting in usecs 31743ca8efaSpding * 31843ca8efaSpding * Wait for all fences on the requested ring to signal (all asics). 31943ca8efaSpding * Returns left time if no timeout, 0 or minus if timeout. 32043ca8efaSpding */ 32143ca8efaSpding signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, 32243ca8efaSpding uint32_t wait_seq, 32343ca8efaSpding signed long timeout) 32443ca8efaSpding { 32543ca8efaSpding uint32_t seq; 32643ca8efaSpding 32743ca8efaSpding do { 32843ca8efaSpding seq = amdgpu_fence_read(ring); 32943ca8efaSpding udelay(5); 33043ca8efaSpding timeout -= 5; 33143ca8efaSpding } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0); 33243ca8efaSpding 33343ca8efaSpding return timeout > 0 ? timeout : 0; 33443ca8efaSpding } 33543ca8efaSpding /** 336d38ceaf9SAlex Deucher * amdgpu_fence_count_emitted - get the count of emitted fences 337d38ceaf9SAlex Deucher * 338d38ceaf9SAlex Deucher * @ring: ring the fence is associated with 339d38ceaf9SAlex Deucher * 340d38ceaf9SAlex Deucher * Get the number of fences emitted on the requested ring (all asics). 341d38ceaf9SAlex Deucher * Returns the number of emitted fences on the ring. Used by the 342d38ceaf9SAlex Deucher * dynpm code to ring track activity. 343d38ceaf9SAlex Deucher */ 344d38ceaf9SAlex Deucher unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) 345d38ceaf9SAlex Deucher { 346d38ceaf9SAlex Deucher uint64_t emitted; 347d38ceaf9SAlex Deucher 348d38ceaf9SAlex Deucher /* We are not protected by ring lock when reading the last sequence 349d38ceaf9SAlex Deucher * but it's ok to report slightly wrong fence count here. 350d38ceaf9SAlex Deucher */ 351d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 352742c085fSChristian König emitted = 0x100000000ull; 353742c085fSChristian König emitted -= atomic_read(&ring->fence_drv.last_seq); 3546aa7de05SMark Rutland emitted += READ_ONCE(ring->fence_drv.sync_seq); 355742c085fSChristian König return lower_32_bits(emitted); 356d38ceaf9SAlex Deucher } 357d38ceaf9SAlex Deucher 358d38ceaf9SAlex Deucher /** 359d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring - make the fence driver 360d38ceaf9SAlex Deucher * ready for use on the requested ring. 361d38ceaf9SAlex Deucher * 362d38ceaf9SAlex Deucher * @ring: ring to start the fence driver on 363d38ceaf9SAlex Deucher * @irq_src: interrupt source to use for this ring 364d38ceaf9SAlex Deucher * @irq_type: interrupt type to use for this ring 365d38ceaf9SAlex Deucher * 366d38ceaf9SAlex Deucher * Make the fence driver ready for processing (all asics). 367d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 368d38ceaf9SAlex Deucher * start the fence driver on the rings it has. 369d38ceaf9SAlex Deucher * Returns 0 for success, errors for failure. 370d38ceaf9SAlex Deucher */ 371d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 372d38ceaf9SAlex Deucher struct amdgpu_irq_src *irq_src, 373d38ceaf9SAlex Deucher unsigned irq_type) 374d38ceaf9SAlex Deucher { 375d38ceaf9SAlex Deucher struct amdgpu_device *adev = ring->adev; 376d38ceaf9SAlex Deucher uint64_t index; 377d38ceaf9SAlex Deucher 378d38ceaf9SAlex Deucher if (ring != &adev->uvd.ring) { 379d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 380d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 381d38ceaf9SAlex Deucher } else { 382d38ceaf9SAlex Deucher /* put fence directly behind firmware */ 383d38ceaf9SAlex Deucher index = ALIGN(adev->uvd.fw->size, 8); 384d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; 385d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; 386d38ceaf9SAlex Deucher } 387742c085fSChristian König amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); 388c6a4079bSChunming Zhou amdgpu_irq_get(adev, irq_src, irq_type); 389c6a4079bSChunming Zhou 390d38ceaf9SAlex Deucher ring->fence_drv.irq_src = irq_src; 391d38ceaf9SAlex Deucher ring->fence_drv.irq_type = irq_type; 392c6a4079bSChunming Zhou ring->fence_drv.initialized = true; 393c6a4079bSChunming Zhou 394d38ceaf9SAlex Deucher dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " 395d38ceaf9SAlex Deucher "cpu addr 0x%p\n", ring->idx, 396d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); 397d38ceaf9SAlex Deucher return 0; 398d38ceaf9SAlex Deucher } 399d38ceaf9SAlex Deucher 400d38ceaf9SAlex Deucher /** 401d38ceaf9SAlex Deucher * amdgpu_fence_driver_init_ring - init the fence driver 402d38ceaf9SAlex Deucher * for the requested ring. 403d38ceaf9SAlex Deucher * 404d38ceaf9SAlex Deucher * @ring: ring to init the fence driver on 405e6151a08SChristian König * @num_hw_submission: number of entries on the hardware queue 406d38ceaf9SAlex Deucher * 407d38ceaf9SAlex Deucher * Init the fence driver for the requested ring (all asics). 408d38ceaf9SAlex Deucher * Helper function for amdgpu_fence_driver_init(). 409d38ceaf9SAlex Deucher */ 410e6151a08SChristian König int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 411e6151a08SChristian König unsigned num_hw_submission) 412d38ceaf9SAlex Deucher { 413cadf97b1SChunming Zhou long timeout; 4145907a0d8SChristian König int r; 415d38ceaf9SAlex Deucher 416e6151a08SChristian König /* Check that num_hw_submission is a power of two */ 417e6151a08SChristian König if ((num_hw_submission & (num_hw_submission - 1)) != 0) 418e6151a08SChristian König return -EINVAL; 419e6151a08SChristian König 420d38ceaf9SAlex Deucher ring->fence_drv.cpu_addr = NULL; 421d38ceaf9SAlex Deucher ring->fence_drv.gpu_addr = 0; 4225907a0d8SChristian König ring->fence_drv.sync_seq = 0; 423742c085fSChristian König atomic_set(&ring->fence_drv.last_seq, 0); 424d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 425d38ceaf9SAlex Deucher 42686cb30ecSKees Cook timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); 427b80d8475SAlex Deucher 42866067ad7SChunming Zhou ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; 4294a7d74f1SChristian König spin_lock_init(&ring->fence_drv.lock); 43066067ad7SChunming Zhou ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), 431c89377d1SChristian König GFP_KERNEL); 432c89377d1SChristian König if (!ring->fence_drv.fences) 433c89377d1SChristian König return -ENOMEM; 4345ec92a76SChristian König 435e2250442STrigger Huang /* No need to setup the GPU scheduler for KIQ ring */ 436e2250442STrigger Huang if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { 437cadf97b1SChunming Zhou timeout = msecs_to_jiffies(amdgpu_lockup_timeout); 4382440ff2cSJunwei Zhang if (timeout == 0) { 4392440ff2cSJunwei Zhang /* 4402440ff2cSJunwei Zhang * FIXME: 4412440ff2cSJunwei Zhang * Delayed workqueue cannot use it directly, 4422440ff2cSJunwei Zhang * so the scheduler will not use delayed workqueue if 4432440ff2cSJunwei Zhang * MAX_SCHEDULE_TIMEOUT is set. 4442440ff2cSJunwei Zhang * Currently keep it simple and silly. 4452440ff2cSJunwei Zhang */ 4462440ff2cSJunwei Zhang timeout = MAX_SCHEDULE_TIMEOUT; 4472440ff2cSJunwei Zhang } 4484f839a24SChristian König r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, 449e6151a08SChristian König num_hw_submission, 4502440ff2cSJunwei Zhang timeout, ring->name); 4514f839a24SChristian König if (r) { 4524f839a24SChristian König DRM_ERROR("Failed to create scheduler on ring %s.\n", 4534f839a24SChristian König ring->name); 4544f839a24SChristian König return r; 455b80d8475SAlex Deucher } 456e2250442STrigger Huang } 457d38ceaf9SAlex Deucher 4584f839a24SChristian König return 0; 4594f839a24SChristian König } 4604f839a24SChristian König 461d38ceaf9SAlex Deucher /** 462d38ceaf9SAlex Deucher * amdgpu_fence_driver_init - init the fence driver 463d38ceaf9SAlex Deucher * for all possible rings. 464d38ceaf9SAlex Deucher * 465d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 466d38ceaf9SAlex Deucher * 467d38ceaf9SAlex Deucher * Init the fence driver for all possible rings (all asics). 468d38ceaf9SAlex Deucher * Not all asics have all rings, so each asic will only 469d38ceaf9SAlex Deucher * start the fence driver on the rings it has using 470d38ceaf9SAlex Deucher * amdgpu_fence_driver_start_ring(). 471d38ceaf9SAlex Deucher * Returns 0 for success. 472d38ceaf9SAlex Deucher */ 473d38ceaf9SAlex Deucher int amdgpu_fence_driver_init(struct amdgpu_device *adev) 474d38ceaf9SAlex Deucher { 475d38ceaf9SAlex Deucher if (amdgpu_debugfs_fence_init(adev)) 476d38ceaf9SAlex Deucher dev_err(adev->dev, "fence debugfs file creation failed\n"); 477d38ceaf9SAlex Deucher 478d38ceaf9SAlex Deucher return 0; 479d38ceaf9SAlex Deucher } 480d38ceaf9SAlex Deucher 481d38ceaf9SAlex Deucher /** 482d38ceaf9SAlex Deucher * amdgpu_fence_driver_fini - tear down the fence driver 483d38ceaf9SAlex Deucher * for all possible rings. 484d38ceaf9SAlex Deucher * 485d38ceaf9SAlex Deucher * @adev: amdgpu device pointer 486d38ceaf9SAlex Deucher * 487d38ceaf9SAlex Deucher * Tear down the fence driver for all possible rings (all asics). 488d38ceaf9SAlex Deucher */ 489d38ceaf9SAlex Deucher void amdgpu_fence_driver_fini(struct amdgpu_device *adev) 490d38ceaf9SAlex Deucher { 491c89377d1SChristian König unsigned i, j; 492c89377d1SChristian König int r; 493d38ceaf9SAlex Deucher 494d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 495d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 496c2776afeSChristian König 497d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 498d38ceaf9SAlex Deucher continue; 499d38ceaf9SAlex Deucher r = amdgpu_fence_wait_empty(ring); 500d38ceaf9SAlex Deucher if (r) { 501d38ceaf9SAlex Deucher /* no need to trigger GPU reset as we are unloading */ 5022f9d4084SMonk Liu amdgpu_fence_driver_force_completion(ring); 503d38ceaf9SAlex Deucher } 504c6a4079bSChunming Zhou amdgpu_irq_put(adev, ring->fence_drv.irq_src, 505c6a4079bSChunming Zhou ring->fence_drv.irq_type); 5064f839a24SChristian König amd_sched_fini(&ring->sched); 507c2776afeSChristian König del_timer_sync(&ring->fence_drv.fallback_timer); 508c89377d1SChristian König for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) 509f54d1867SChris Wilson dma_fence_put(ring->fence_drv.fences[j]); 510c89377d1SChristian König kfree(ring->fence_drv.fences); 51154ddf3a6SGrazvydas Ignotas ring->fence_drv.fences = NULL; 512d38ceaf9SAlex Deucher ring->fence_drv.initialized = false; 513d38ceaf9SAlex Deucher } 514d38ceaf9SAlex Deucher } 515d38ceaf9SAlex Deucher 516d38ceaf9SAlex Deucher /** 5175ceb54c6SAlex Deucher * amdgpu_fence_driver_suspend - suspend the fence driver 5185ceb54c6SAlex Deucher * for all possible rings. 5195ceb54c6SAlex Deucher * 5205ceb54c6SAlex Deucher * @adev: amdgpu device pointer 5215ceb54c6SAlex Deucher * 5225ceb54c6SAlex Deucher * Suspend the fence driver for all possible rings (all asics). 5235ceb54c6SAlex Deucher */ 5245ceb54c6SAlex Deucher void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) 5255ceb54c6SAlex Deucher { 5265ceb54c6SAlex Deucher int i, r; 5275ceb54c6SAlex Deucher 5285ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 5295ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 5305ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 5315ceb54c6SAlex Deucher continue; 5325ceb54c6SAlex Deucher 5335ceb54c6SAlex Deucher /* wait for gpu to finish processing current batch */ 5345ceb54c6SAlex Deucher r = amdgpu_fence_wait_empty(ring); 5355ceb54c6SAlex Deucher if (r) { 5365ceb54c6SAlex Deucher /* delay GPU reset to resume */ 5372f9d4084SMonk Liu amdgpu_fence_driver_force_completion(ring); 5385ceb54c6SAlex Deucher } 5395ceb54c6SAlex Deucher 5405ceb54c6SAlex Deucher /* disable the interrupt */ 5415ceb54c6SAlex Deucher amdgpu_irq_put(adev, ring->fence_drv.irq_src, 5425ceb54c6SAlex Deucher ring->fence_drv.irq_type); 5435ceb54c6SAlex Deucher } 5445ceb54c6SAlex Deucher } 5455ceb54c6SAlex Deucher 5465ceb54c6SAlex Deucher /** 5475ceb54c6SAlex Deucher * amdgpu_fence_driver_resume - resume the fence driver 5485ceb54c6SAlex Deucher * for all possible rings. 5495ceb54c6SAlex Deucher * 5505ceb54c6SAlex Deucher * @adev: amdgpu device pointer 5515ceb54c6SAlex Deucher * 5525ceb54c6SAlex Deucher * Resume the fence driver for all possible rings (all asics). 5535ceb54c6SAlex Deucher * Not all asics have all rings, so each asic will only 5545ceb54c6SAlex Deucher * start the fence driver on the rings it has using 5555ceb54c6SAlex Deucher * amdgpu_fence_driver_start_ring(). 5565ceb54c6SAlex Deucher * Returns 0 for success. 5575ceb54c6SAlex Deucher */ 5585ceb54c6SAlex Deucher void amdgpu_fence_driver_resume(struct amdgpu_device *adev) 5595ceb54c6SAlex Deucher { 5605ceb54c6SAlex Deucher int i; 5615ceb54c6SAlex Deucher 5625ceb54c6SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 5635ceb54c6SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 5645ceb54c6SAlex Deucher if (!ring || !ring->fence_drv.initialized) 5655ceb54c6SAlex Deucher continue; 5665ceb54c6SAlex Deucher 5675ceb54c6SAlex Deucher /* enable the interrupt */ 5685ceb54c6SAlex Deucher amdgpu_irq_get(adev, ring->fence_drv.irq_src, 5695ceb54c6SAlex Deucher ring->fence_drv.irq_type); 5705ceb54c6SAlex Deucher } 5715ceb54c6SAlex Deucher } 5725ceb54c6SAlex Deucher 5735ceb54c6SAlex Deucher /** 5742f9d4084SMonk Liu * amdgpu_fence_driver_force_completion - force signal latest fence of ring 575d38ceaf9SAlex Deucher * 5762f9d4084SMonk Liu * @ring: fence of the ring to signal 577d38ceaf9SAlex Deucher * 578d38ceaf9SAlex Deucher */ 5792f9d4084SMonk Liu void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) 580d38ceaf9SAlex Deucher { 5815907a0d8SChristian König amdgpu_fence_write(ring, ring->fence_drv.sync_seq); 5822f9d4084SMonk Liu amdgpu_fence_process(ring); 58365781c78SMonk Liu } 58465781c78SMonk Liu 585a95e2642SChristian König /* 586a95e2642SChristian König * Common fence implementation 587a95e2642SChristian König */ 588a95e2642SChristian König 589f54d1867SChris Wilson static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) 590a95e2642SChristian König { 591a95e2642SChristian König return "amdgpu"; 592a95e2642SChristian König } 593a95e2642SChristian König 594f54d1867SChris Wilson static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) 595a95e2642SChristian König { 596a95e2642SChristian König struct amdgpu_fence *fence = to_amdgpu_fence(f); 597a95e2642SChristian König return (const char *)fence->ring->name; 598a95e2642SChristian König } 599a95e2642SChristian König 600a95e2642SChristian König /** 601a95e2642SChristian König * amdgpu_fence_enable_signaling - enable signalling on fence 602a95e2642SChristian König * @fence: fence 603a95e2642SChristian König * 604a95e2642SChristian König * This function is called with fence_queue lock held, and adds a callback 605a95e2642SChristian König * to fence_queue that checks if this fence is signaled, and if so it 606a95e2642SChristian König * signals the fence and removes itself. 607a95e2642SChristian König */ 608f54d1867SChris Wilson static bool amdgpu_fence_enable_signaling(struct dma_fence *f) 609a95e2642SChristian König { 610a95e2642SChristian König struct amdgpu_fence *fence = to_amdgpu_fence(f); 611a95e2642SChristian König struct amdgpu_ring *ring = fence->ring; 612a95e2642SChristian König 613c2776afeSChristian König if (!timer_pending(&ring->fence_drv.fallback_timer)) 614c2776afeSChristian König amdgpu_fence_schedule_fallback(ring); 6154a7d74f1SChristian König 616f54d1867SChris Wilson DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 6174a7d74f1SChristian König 618a95e2642SChristian König return true; 619a95e2642SChristian König } 620a95e2642SChristian König 621b4413535SChristian König /** 622b4413535SChristian König * amdgpu_fence_free - free up the fence memory 623b4413535SChristian König * 624b4413535SChristian König * @rcu: RCU callback head 625b4413535SChristian König * 626b4413535SChristian König * Free up the fence memory after the RCU grace period. 627b4413535SChristian König */ 628b4413535SChristian König static void amdgpu_fence_free(struct rcu_head *rcu) 629b49c84a5SChunming Zhou { 630f54d1867SChris Wilson struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 631b49c84a5SChunming Zhou struct amdgpu_fence *fence = to_amdgpu_fence(f); 632b49c84a5SChunming Zhou kmem_cache_free(amdgpu_fence_slab, fence); 633b49c84a5SChunming Zhou } 634b49c84a5SChunming Zhou 635b4413535SChristian König /** 636b4413535SChristian König * amdgpu_fence_release - callback that fence can be freed 637b4413535SChristian König * 638b4413535SChristian König * @fence: fence 639b4413535SChristian König * 640b4413535SChristian König * This function is called when the reference count becomes zero. 641b4413535SChristian König * It just RCU schedules freeing up the fence. 642b4413535SChristian König */ 643f54d1867SChris Wilson static void amdgpu_fence_release(struct dma_fence *f) 644b4413535SChristian König { 645b4413535SChristian König call_rcu(&f->rcu, amdgpu_fence_free); 646b4413535SChristian König } 647b4413535SChristian König 648f54d1867SChris Wilson static const struct dma_fence_ops amdgpu_fence_ops = { 649a95e2642SChristian König .get_driver_name = amdgpu_fence_get_driver_name, 650a95e2642SChristian König .get_timeline_name = amdgpu_fence_get_timeline_name, 651a95e2642SChristian König .enable_signaling = amdgpu_fence_enable_signaling, 652f54d1867SChris Wilson .wait = dma_fence_default_wait, 653b49c84a5SChunming Zhou .release = amdgpu_fence_release, 654a95e2642SChristian König }; 655d38ceaf9SAlex Deucher 656d38ceaf9SAlex Deucher /* 657d38ceaf9SAlex Deucher * Fence debugfs 658d38ceaf9SAlex Deucher */ 659d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 660d38ceaf9SAlex Deucher static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) 661d38ceaf9SAlex Deucher { 662d38ceaf9SAlex Deucher struct drm_info_node *node = (struct drm_info_node *)m->private; 663d38ceaf9SAlex Deucher struct drm_device *dev = node->minor->dev; 664d38ceaf9SAlex Deucher struct amdgpu_device *adev = dev->dev_private; 6655907a0d8SChristian König int i; 666d38ceaf9SAlex Deucher 667d38ceaf9SAlex Deucher for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 668d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 669d38ceaf9SAlex Deucher if (!ring || !ring->fence_drv.initialized) 670d38ceaf9SAlex Deucher continue; 671d38ceaf9SAlex Deucher 672d38ceaf9SAlex Deucher amdgpu_fence_process(ring); 673d38ceaf9SAlex Deucher 674344c19f9SChristian König seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); 675742c085fSChristian König seq_printf(m, "Last signaled fence 0x%08x\n", 676742c085fSChristian König atomic_read(&ring->fence_drv.last_seq)); 677742c085fSChristian König seq_printf(m, "Last emitted 0x%08x\n", 6785907a0d8SChristian König ring->fence_drv.sync_seq); 679e71de076Spding 680e71de076Spding if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 681e71de076Spding continue; 682e71de076Spding 683e71de076Spding /* set in CP_VMID_PREEMPT and preemption occurred */ 684e71de076Spding seq_printf(m, "Last preempted 0x%08x\n", 685e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 2))); 686e71de076Spding /* set in CP_VMID_RESET and reset occurred */ 687e71de076Spding seq_printf(m, "Last reset 0x%08x\n", 688e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 4))); 689e71de076Spding /* Both preemption and reset occurred */ 690e71de076Spding seq_printf(m, "Last both 0x%08x\n", 691e71de076Spding le32_to_cpu(*(ring->fence_drv.cpu_addr + 6))); 692d38ceaf9SAlex Deucher } 693d38ceaf9SAlex Deucher return 0; 694d38ceaf9SAlex Deucher } 695d38ceaf9SAlex Deucher 69618db89b4SAlex Deucher /** 69718db89b4SAlex Deucher * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset 69818db89b4SAlex Deucher * 69918db89b4SAlex Deucher * Manually trigger a gpu reset at the next fence wait. 70018db89b4SAlex Deucher */ 70118db89b4SAlex Deucher static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) 70218db89b4SAlex Deucher { 70318db89b4SAlex Deucher struct drm_info_node *node = (struct drm_info_node *) m->private; 70418db89b4SAlex Deucher struct drm_device *dev = node->minor->dev; 70518db89b4SAlex Deucher struct amdgpu_device *adev = dev->dev_private; 70618db89b4SAlex Deucher 70718db89b4SAlex Deucher seq_printf(m, "gpu reset\n"); 70818db89b4SAlex Deucher amdgpu_gpu_reset(adev); 70918db89b4SAlex Deucher 71018db89b4SAlex Deucher return 0; 71118db89b4SAlex Deucher } 71218db89b4SAlex Deucher 71306ab6832SNils Wallménius static const struct drm_info_list amdgpu_debugfs_fence_list[] = { 714d38ceaf9SAlex Deucher {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 71518db89b4SAlex Deucher {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} 716d38ceaf9SAlex Deucher }; 7174fbf87e2SMonk Liu 7184fbf87e2SMonk Liu static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { 7194fbf87e2SMonk Liu {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, 7204fbf87e2SMonk Liu }; 721d38ceaf9SAlex Deucher #endif 722d38ceaf9SAlex Deucher 723d38ceaf9SAlex Deucher int amdgpu_debugfs_fence_init(struct amdgpu_device *adev) 724d38ceaf9SAlex Deucher { 725d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 7264fbf87e2SMonk Liu if (amdgpu_sriov_vf(adev)) 7274fbf87e2SMonk Liu return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1); 72818db89b4SAlex Deucher return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2); 729d38ceaf9SAlex Deucher #else 730d38ceaf9SAlex Deucher return 0; 731d38ceaf9SAlex Deucher #endif 732d38ceaf9SAlex Deucher } 733d38ceaf9SAlex Deucher 734