1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <asm/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/list.h> 35 #include <linux/kref.h> 36 #include <linux/slab.h> 37 #include "drmP.h" 38 #include "drm.h" 39 #include "radeon_reg.h" 40 #include "radeon.h" 41 42 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 43 { 44 unsigned long irq_flags; 45 46 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 47 if (fence->emited) { 48 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 49 return 0; 50 } 51 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 52 if (!rdev->cp.ready) { 53 /* FIXME: cp is not running assume everythings is done right 54 * away 55 */ 56 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 57 } else 58 radeon_fence_ring_emit(rdev, fence); 59 60 fence->emited = true; 61 list_del(&fence->list); 62 list_add_tail(&fence->list, &rdev->fence_drv.emited); 63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 64 return 0; 65 } 66 67 static bool radeon_fence_poll_locked(struct radeon_device *rdev) 68 { 69 struct radeon_fence *fence; 70 struct list_head *i, *n; 71 uint32_t seq; 72 bool wake = false; 73 unsigned long cjiffies; 74 75 if (rdev->wb.enabled) { 76 u32 scratch_index; 77 if (rdev->wb.use_event) 78 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 79 else 80 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 81 seq = rdev->wb.wb[scratch_index/4]; 82 } else 83 seq = RREG32(rdev->fence_drv.scratch_reg); 84 if (seq != rdev->fence_drv.last_seq) { 85 rdev->fence_drv.last_seq = seq; 86 rdev->fence_drv.last_jiffies = jiffies; 87 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 88 } else { 89 cjiffies = jiffies; 90 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { 91 cjiffies -= rdev->fence_drv.last_jiffies; 92 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { 93 /* update the timeout */ 94 rdev->fence_drv.last_timeout -= cjiffies; 95 } else { 96 /* the 500ms timeout is elapsed we should test 97 * for GPU lockup 98 */ 99 rdev->fence_drv.last_timeout = 1; 100 } 101 } else { 102 /* wrap around update last jiffies, we will just wait 103 * a little longer 104 */ 105 rdev->fence_drv.last_jiffies = cjiffies; 106 } 107 return false; 108 } 109 n = NULL; 110 list_for_each(i, &rdev->fence_drv.emited) { 111 fence = list_entry(i, struct radeon_fence, list); 112 if (fence->seq == seq) { 113 n = i; 114 break; 115 } 116 } 117 /* all fence previous to this one are considered as signaled */ 118 if (n) { 119 i = n; 120 do { 121 n = i->prev; 122 list_del(i); 123 list_add_tail(i, &rdev->fence_drv.signaled); 124 fence = list_entry(i, struct radeon_fence, list); 125 fence->signaled = true; 126 i = n; 127 } while (i != &rdev->fence_drv.emited); 128 wake = true; 129 } 130 return wake; 131 } 132 133 static void radeon_fence_destroy(struct kref *kref) 134 { 135 unsigned long irq_flags; 136 struct radeon_fence *fence; 137 138 fence = container_of(kref, struct radeon_fence, kref); 139 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 140 list_del(&fence->list); 141 fence->emited = false; 142 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 143 kfree(fence); 144 } 145 146 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) 147 { 148 unsigned long irq_flags; 149 150 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 151 if ((*fence) == NULL) { 152 return -ENOMEM; 153 } 154 kref_init(&((*fence)->kref)); 155 (*fence)->rdev = rdev; 156 (*fence)->emited = false; 157 (*fence)->signaled = false; 158 (*fence)->seq = 0; 159 INIT_LIST_HEAD(&(*fence)->list); 160 161 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 162 list_add_tail(&(*fence)->list, &rdev->fence_drv.created); 163 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 164 return 0; 165 } 166 167 168 bool radeon_fence_signaled(struct radeon_fence *fence) 169 { 170 unsigned long irq_flags; 171 bool signaled = false; 172 173 if (!fence) 174 return true; 175 176 if (fence->rdev->gpu_lockup) 177 return true; 178 179 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 180 signaled = fence->signaled; 181 /* if we are shuting down report all fence as signaled */ 182 if (fence->rdev->shutdown) { 183 signaled = true; 184 } 185 if (!fence->emited) { 186 WARN(1, "Querying an unemited fence : %p !\n", fence); 187 signaled = true; 188 } 189 if (!signaled) { 190 radeon_fence_poll_locked(fence->rdev); 191 signaled = fence->signaled; 192 } 193 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 194 return signaled; 195 } 196 197 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 198 { 199 struct radeon_device *rdev; 200 unsigned long irq_flags, timeout; 201 u32 seq; 202 int r; 203 204 if (fence == NULL) { 205 WARN(1, "Querying an invalid fence : %p !\n", fence); 206 return 0; 207 } 208 rdev = fence->rdev; 209 if (radeon_fence_signaled(fence)) { 210 return 0; 211 } 212 timeout = rdev->fence_drv.last_timeout; 213 retry: 214 /* save current sequence used to check for GPU lockup */ 215 seq = rdev->fence_drv.last_seq; 216 if (intr) { 217 radeon_irq_kms_sw_irq_get(rdev); 218 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 219 radeon_fence_signaled(fence), timeout); 220 radeon_irq_kms_sw_irq_put(rdev); 221 if (unlikely(r < 0)) { 222 return r; 223 } 224 } else { 225 radeon_irq_kms_sw_irq_get(rdev); 226 r = wait_event_timeout(rdev->fence_drv.queue, 227 radeon_fence_signaled(fence), timeout); 228 radeon_irq_kms_sw_irq_put(rdev); 229 } 230 if (unlikely(!radeon_fence_signaled(fence))) { 231 /* we were interrupted for some reason and fence isn't 232 * isn't signaled yet, resume wait 233 */ 234 if (r) { 235 timeout = r; 236 goto retry; 237 } 238 /* don't protect read access to rdev->fence_drv.last_seq 239 * if we experiencing a lockup the value doesn't change 240 */ 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 242 /* good news we believe it's a lockup */ 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 244 fence->seq, seq); 245 /* FIXME: what should we do ? marking everyone 246 * as signaled for now 247 */ 248 rdev->gpu_lockup = true; 249 r = radeon_gpu_reset(rdev); 250 if (r) 251 return r; 252 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 253 rdev->gpu_lockup = false; 254 } 255 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 256 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 257 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 258 rdev->fence_drv.last_jiffies = jiffies; 259 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 260 goto retry; 261 } 262 return 0; 263 } 264 265 int radeon_fence_wait_next(struct radeon_device *rdev) 266 { 267 unsigned long irq_flags; 268 struct radeon_fence *fence; 269 int r; 270 271 if (rdev->gpu_lockup) { 272 return 0; 273 } 274 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 275 if (list_empty(&rdev->fence_drv.emited)) { 276 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 277 return 0; 278 } 279 fence = list_entry(rdev->fence_drv.emited.next, 280 struct radeon_fence, list); 281 radeon_fence_ref(fence); 282 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 283 r = radeon_fence_wait(fence, false); 284 radeon_fence_unref(&fence); 285 return r; 286 } 287 288 int radeon_fence_wait_last(struct radeon_device *rdev) 289 { 290 unsigned long irq_flags; 291 struct radeon_fence *fence; 292 int r; 293 294 if (rdev->gpu_lockup) { 295 return 0; 296 } 297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 298 if (list_empty(&rdev->fence_drv.emited)) { 299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 300 return 0; 301 } 302 fence = list_entry(rdev->fence_drv.emited.prev, 303 struct radeon_fence, list); 304 radeon_fence_ref(fence); 305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 306 r = radeon_fence_wait(fence, false); 307 radeon_fence_unref(&fence); 308 return r; 309 } 310 311 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 312 { 313 kref_get(&fence->kref); 314 return fence; 315 } 316 317 void radeon_fence_unref(struct radeon_fence **fence) 318 { 319 struct radeon_fence *tmp = *fence; 320 321 *fence = NULL; 322 if (tmp) { 323 kref_put(&tmp->kref, &radeon_fence_destroy); 324 } 325 } 326 327 void radeon_fence_process(struct radeon_device *rdev) 328 { 329 unsigned long irq_flags; 330 bool wake; 331 332 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 333 wake = radeon_fence_poll_locked(rdev); 334 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 335 if (wake) { 336 wake_up_all(&rdev->fence_drv.queue); 337 } 338 } 339 340 int radeon_fence_driver_init(struct radeon_device *rdev) 341 { 342 unsigned long irq_flags; 343 int r; 344 345 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 346 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); 347 if (r) { 348 dev_err(rdev->dev, "fence failed to get scratch register\n"); 349 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 350 return r; 351 } 352 WREG32(rdev->fence_drv.scratch_reg, 0); 353 atomic_set(&rdev->fence_drv.seq, 0); 354 INIT_LIST_HEAD(&rdev->fence_drv.created); 355 INIT_LIST_HEAD(&rdev->fence_drv.emited); 356 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 357 init_waitqueue_head(&rdev->fence_drv.queue); 358 rdev->fence_drv.initialized = true; 359 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 360 if (radeon_debugfs_fence_init(rdev)) { 361 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 362 } 363 return 0; 364 } 365 366 void radeon_fence_driver_fini(struct radeon_device *rdev) 367 { 368 unsigned long irq_flags; 369 370 if (!rdev->fence_drv.initialized) 371 return; 372 wake_up_all(&rdev->fence_drv.queue); 373 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 374 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); 375 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 376 rdev->fence_drv.initialized = false; 377 } 378 379 380 /* 381 * Fence debugfs 382 */ 383 #if defined(CONFIG_DEBUG_FS) 384 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 385 { 386 struct drm_info_node *node = (struct drm_info_node *)m->private; 387 struct drm_device *dev = node->minor->dev; 388 struct radeon_device *rdev = dev->dev_private; 389 struct radeon_fence *fence; 390 391 seq_printf(m, "Last signaled fence 0x%08X\n", 392 RREG32(rdev->fence_drv.scratch_reg)); 393 if (!list_empty(&rdev->fence_drv.emited)) { 394 fence = list_entry(rdev->fence_drv.emited.prev, 395 struct radeon_fence, list); 396 seq_printf(m, "Last emited fence %p with 0x%08X\n", 397 fence, fence->seq); 398 } 399 return 0; 400 } 401 402 static struct drm_info_list radeon_debugfs_fence_list[] = { 403 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 404 }; 405 #endif 406 407 int radeon_debugfs_fence_init(struct radeon_device *rdev) 408 { 409 #if defined(CONFIG_DEBUG_FS) 410 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 411 #else 412 return 0; 413 #endif 414 } 415