1 /* 2 * Copyright 2011 Christian König. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 #include <drm/drmP.h> 31 #include "radeon.h" 32 #include "radeon_trace.h" 33 34 int radeon_semaphore_create(struct radeon_device *rdev, 35 struct radeon_semaphore **semaphore) 36 { 37 uint32_t *cpu_addr; 38 int i, r; 39 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 41 if (*semaphore == NULL) { 42 return -ENOMEM; 43 } 44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 45 8 * RADEON_NUM_SYNCS, 8); 46 if (r) { 47 kfree(*semaphore); 48 *semaphore = NULL; 49 return r; 50 } 51 (*semaphore)->waiters = 0; 52 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 53 54 cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); 55 for (i = 0; i < RADEON_NUM_SYNCS; ++i) 56 cpu_addr[i] = 0; 57 58 for (i = 0; i < RADEON_NUM_RINGS; ++i) 59 (*semaphore)->sync_to[i] = NULL; 60 61 return 0; 62 } 63 64 bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx, 65 struct radeon_semaphore *semaphore) 66 { 67 struct radeon_ring *ring = &rdev->ring[ridx]; 68 69 trace_radeon_semaphore_signale(ridx, semaphore); 70 71 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { 72 --semaphore->waiters; 73 74 /* for debugging lockup only, used by sysfs debug files */ 75 ring->last_semaphore_signal_addr = semaphore->gpu_addr; 76 return true; 77 } 78 return false; 79 } 80 81 bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, 82 struct radeon_semaphore *semaphore) 83 { 84 struct radeon_ring *ring = &rdev->ring[ridx]; 85 86 trace_radeon_semaphore_wait(ridx, semaphore); 87 88 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { 89 ++semaphore->waiters; 90 91 /* for debugging lockup only, used by sysfs debug files */ 92 ring->last_semaphore_wait_addr = semaphore->gpu_addr; 93 return true; 94 } 95 return false; 96 } 97 98 /** 99 * radeon_semaphore_sync_to - use the semaphore to sync to a fence 100 * 101 * @semaphore: semaphore object to add fence to 102 * @fence: fence to sync to 103 * 104 * Sync to the fence using this semaphore object 105 */ 106 void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, 107 struct radeon_fence *fence) 108 { 109 struct radeon_fence *other; 110 111 if (!fence) 112 return; 113 114 other = semaphore->sync_to[fence->ring]; 115 semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); 116 } 117 118 /** 119 * radeon_semaphore_sync_rings - sync ring to all registered fences 120 * 121 * @rdev: radeon_device pointer 122 * @semaphore: semaphore object to use for sync 123 * @ring: ring that needs sync 124 * 125 * Ensure that all registered fences are signaled before letting 126 * the ring continue. The caller must hold the ring lock. 127 */ 128 int radeon_semaphore_sync_rings(struct radeon_device *rdev, 129 struct radeon_semaphore *semaphore, 130 int ring) 131 { 132 unsigned count = 0; 133 int i, r; 134 135 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 136 struct radeon_fence *fence = semaphore->sync_to[i]; 137 138 /* check if we really need to sync */ 139 if (!radeon_fence_need_sync(fence, ring)) 140 continue; 141 142 /* prevent GPU deadlocks */ 143 if (!rdev->ring[i].ready) { 144 dev_err(rdev->dev, "Syncing to a disabled ring!"); 145 return -EINVAL; 146 } 147 148 if (++count > RADEON_NUM_SYNCS) { 149 /* not enough room, wait manually */ 150 r = radeon_fence_wait(fence, false); 151 if (r) 152 return r; 153 continue; 154 } 155 156 /* allocate enough space for sync command */ 157 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); 158 if (r) { 159 return r; 160 } 161 162 /* emit the signal semaphore */ 163 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { 164 /* signaling wasn't successful wait manually */ 165 radeon_ring_undo(&rdev->ring[i]); 166 r = radeon_fence_wait(fence, false); 167 if (r) 168 return r; 169 continue; 170 } 171 172 /* we assume caller has already allocated space on waiters ring */ 173 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { 174 /* waiting wasn't successful wait manually */ 175 radeon_ring_undo(&rdev->ring[i]); 176 r = radeon_fence_wait(fence, false); 177 if (r) 178 return r; 179 continue; 180 } 181 182 radeon_ring_commit(rdev, &rdev->ring[i]); 183 radeon_fence_note_sync(fence, ring); 184 185 semaphore->gpu_addr += 8; 186 } 187 188 return 0; 189 } 190 191 void radeon_semaphore_free(struct radeon_device *rdev, 192 struct radeon_semaphore **semaphore, 193 struct radeon_fence *fence) 194 { 195 if (semaphore == NULL || *semaphore == NULL) { 196 return; 197 } 198 if ((*semaphore)->waiters > 0) { 199 dev_err(rdev->dev, "semaphore %p has more waiters than signalers," 200 " hardware lockup imminent!\n", *semaphore); 201 } 202 radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence); 203 kfree(*semaphore); 204 *semaphore = NULL; 205 } 206