1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <drm/drmP.h> 25 #include "radeon.h" 26 #include "radeon_asic.h" 27 #include "evergreend.h" 28 29 u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev); 30 31 /** 32 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring 33 * 34 * @rdev: radeon_device pointer 35 * @fence: radeon fence object 36 * 37 * Add a DMA fence packet to the ring to write 38 * the fence seq number and DMA trap packet to generate 39 * an interrupt if needed (evergreen-SI). 40 */ 41 void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, 42 struct radeon_fence *fence) 43 { 44 struct radeon_ring *ring = &rdev->ring[fence->ring]; 45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 46 /* write the fence */ 47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); 48 radeon_ring_write(ring, addr & 0xfffffffc); 49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 50 radeon_ring_write(ring, fence->seq); 51 /* generate an interrupt */ 52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); 53 /* flush HDP */ 54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); 55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 56 radeon_ring_write(ring, 1); 57 } 58 59 /** 60 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine 61 * 62 * @rdev: radeon_device pointer 63 * @ib: IB object to schedule 64 * 65 * Schedule an IB in the DMA ring (evergreen). 66 */ 67 void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, 68 struct radeon_ib *ib) 69 { 70 struct radeon_ring *ring = &rdev->ring[ib->ring]; 71 72 if (rdev->wb.enabled) { 73 u32 next_rptr = ring->wptr + 4; 74 while ((next_rptr & 7) != 5) 75 next_rptr++; 76 next_rptr += 3; 77 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); 78 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 79 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 80 radeon_ring_write(ring, next_rptr); 81 } 82 83 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 84 * Pad as necessary with NOPs. 85 */ 86 while ((ring->wptr & 7) != 5) 87 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); 88 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); 89 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 90 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 91 92 } 93 94 /** 95 * evergreen_copy_dma - copy pages using the DMA engine 96 * 97 * @rdev: radeon_device pointer 98 * @src_offset: src GPU address 99 * @dst_offset: dst GPU address 100 * @num_gpu_pages: number of GPU pages to xfer 101 * @fence: radeon fence object 102 * 103 * Copy GPU paging using the DMA engine (evergreen-cayman). 104 * Used by the radeon ttm implementation to move pages if 105 * registered as the asic copy callback. 106 */ 107 int evergreen_copy_dma(struct radeon_device *rdev, 108 uint64_t src_offset, uint64_t dst_offset, 109 unsigned num_gpu_pages, 110 struct radeon_fence **fence) 111 { 112 struct radeon_semaphore *sem = NULL; 113 int ring_index = rdev->asic->copy.dma_ring_index; 114 struct radeon_ring *ring = &rdev->ring[ring_index]; 115 u32 size_in_dw, cur_size_in_dw; 116 int i, num_loops; 117 int r = 0; 118 119 r = radeon_semaphore_create(rdev, &sem); 120 if (r) { 121 DRM_ERROR("radeon: moving bo (%d).\n", r); 122 return r; 123 } 124 125 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 126 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); 127 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); 128 if (r) { 129 DRM_ERROR("radeon: moving bo (%d).\n", r); 130 radeon_semaphore_free(rdev, &sem, NULL); 131 return r; 132 } 133 134 radeon_semaphore_sync_to(sem, *fence); 135 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 136 137 for (i = 0; i < num_loops; i++) { 138 cur_size_in_dw = size_in_dw; 139 if (cur_size_in_dw > 0xFFFFF) 140 cur_size_in_dw = 0xFFFFF; 141 size_in_dw -= cur_size_in_dw; 142 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); 143 radeon_ring_write(ring, dst_offset & 0xfffffffc); 144 radeon_ring_write(ring, src_offset & 0xfffffffc); 145 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 146 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 147 src_offset += cur_size_in_dw * 4; 148 dst_offset += cur_size_in_dw * 4; 149 } 150 151 r = radeon_fence_emit(rdev, fence, ring->idx); 152 if (r) { 153 radeon_ring_unlock_undo(rdev, ring); 154 radeon_semaphore_free(rdev, &sem, NULL); 155 return r; 156 } 157 158 radeon_ring_unlock_commit(rdev, ring); 159 radeon_semaphore_free(rdev, &sem, *fence); 160 161 return r; 162 } 163 164 /** 165 * evergreen_dma_is_lockup - Check if the DMA engine is locked up 166 * 167 * @rdev: radeon_device pointer 168 * @ring: radeon_ring structure holding ring information 169 * 170 * Check if the async DMA engine is locked up. 171 * Returns true if the engine appears to be locked up, false if not. 172 */ 173 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 174 { 175 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); 176 177 if (!(reset_mask & RADEON_RESET_DMA)) { 178 radeon_ring_lockup_update(rdev, ring); 179 return false; 180 } 181 return radeon_ring_test_lockup(rdev, ring); 182 } 183 184 185