1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31 #include <linux/seq_file.h> 32 #include <linux/atomic.h> 33 #include <linux/wait.h> 34 #include <linux/list.h> 35 #include <linux/kref.h> 36 #include <linux/slab.h> 37 #include "drmP.h" 38 #include "drm.h" 39 #include "radeon_reg.h" 40 #include "radeon.h" 41 #include "radeon_trace.h" 42 43 static void radeon_fence_write(struct radeon_device *rdev, u32 seq) 44 { 45 if (rdev->wb.enabled) { 46 u32 scratch_index; 47 if (rdev->wb.use_event) 48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 49 else 50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);; 52 } else 53 WREG32(rdev->fence_drv.scratch_reg, seq); 54 } 55 56 static u32 radeon_fence_read(struct radeon_device *rdev) 57 { 58 u32 seq; 59 60 if (rdev->wb.enabled) { 61 u32 scratch_index; 62 if (rdev->wb.use_event) 63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 64 else 65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); 67 } else 68 seq = RREG32(rdev->fence_drv.scratch_reg); 69 return seq; 70 } 71 72 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 73 { 74 unsigned long irq_flags; 75 76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 77 if (fence->emited) { 78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 79 return 0; 80 } 81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 82 if (!rdev->cp.ready) 83 /* FIXME: cp is not running assume everythings is done right 84 * away 85 */ 86 radeon_fence_write(rdev, fence->seq); 87 else 88 radeon_fence_ring_emit(rdev, fence); 89 90 trace_radeon_fence_emit(rdev->ddev, fence->seq); 91 fence->emited = true; 92 list_move_tail(&fence->list, &rdev->fence_drv.emited); 93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 94 return 0; 95 } 96 97 static bool radeon_fence_poll_locked(struct radeon_device *rdev) 98 { 99 struct radeon_fence *fence; 100 struct list_head *i, *n; 101 uint32_t seq; 102 bool wake = false; 103 unsigned long cjiffies; 104 105 seq = radeon_fence_read(rdev); 106 if (seq != rdev->fence_drv.last_seq) { 107 rdev->fence_drv.last_seq = seq; 108 rdev->fence_drv.last_jiffies = jiffies; 109 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 110 } else { 111 cjiffies = jiffies; 112 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { 113 cjiffies -= rdev->fence_drv.last_jiffies; 114 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { 115 /* update the timeout */ 116 rdev->fence_drv.last_timeout -= cjiffies; 117 } else { 118 /* the 500ms timeout is elapsed we should test 119 * for GPU lockup 120 */ 121 rdev->fence_drv.last_timeout = 1; 122 } 123 } else { 124 /* wrap around update last jiffies, we will just wait 125 * a little longer 126 */ 127 rdev->fence_drv.last_jiffies = cjiffies; 128 } 129 return false; 130 } 131 n = NULL; 132 list_for_each(i, &rdev->fence_drv.emited) { 133 fence = list_entry(i, struct radeon_fence, list); 134 if (fence->seq == seq) { 135 n = i; 136 break; 137 } 138 } 139 /* all fence previous to this one are considered as signaled */ 140 if (n) { 141 i = n; 142 do { 143 n = i->prev; 144 list_move_tail(i, &rdev->fence_drv.signaled); 145 fence = list_entry(i, struct radeon_fence, list); 146 fence->signaled = true; 147 i = n; 148 } while (i != &rdev->fence_drv.emited); 149 wake = true; 150 } 151 return wake; 152 } 153 154 static void radeon_fence_destroy(struct kref *kref) 155 { 156 unsigned long irq_flags; 157 struct radeon_fence *fence; 158 159 fence = container_of(kref, struct radeon_fence, kref); 160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 161 list_del(&fence->list); 162 fence->emited = false; 163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 164 kfree(fence); 165 } 166 167 int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) 168 { 169 unsigned long irq_flags; 170 171 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 172 if ((*fence) == NULL) { 173 return -ENOMEM; 174 } 175 kref_init(&((*fence)->kref)); 176 (*fence)->rdev = rdev; 177 (*fence)->emited = false; 178 (*fence)->signaled = false; 179 (*fence)->seq = 0; 180 INIT_LIST_HEAD(&(*fence)->list); 181 182 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 183 list_add_tail(&(*fence)->list, &rdev->fence_drv.created); 184 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 185 return 0; 186 } 187 188 189 bool radeon_fence_signaled(struct radeon_fence *fence) 190 { 191 unsigned long irq_flags; 192 bool signaled = false; 193 194 if (!fence) 195 return true; 196 197 if (fence->rdev->gpu_lockup) 198 return true; 199 200 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 201 signaled = fence->signaled; 202 /* if we are shuting down report all fence as signaled */ 203 if (fence->rdev->shutdown) { 204 signaled = true; 205 } 206 if (!fence->emited) { 207 WARN(1, "Querying an unemited fence : %p !\n", fence); 208 signaled = true; 209 } 210 if (!signaled) { 211 radeon_fence_poll_locked(fence->rdev); 212 signaled = fence->signaled; 213 } 214 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 215 return signaled; 216 } 217 218 int radeon_fence_wait(struct radeon_fence *fence, bool intr) 219 { 220 struct radeon_device *rdev; 221 unsigned long irq_flags, timeout; 222 u32 seq; 223 int r; 224 225 if (fence == NULL) { 226 WARN(1, "Querying an invalid fence : %p !\n", fence); 227 return 0; 228 } 229 rdev = fence->rdev; 230 if (radeon_fence_signaled(fence)) { 231 return 0; 232 } 233 timeout = rdev->fence_drv.last_timeout; 234 retry: 235 /* save current sequence used to check for GPU lockup */ 236 seq = rdev->fence_drv.last_seq; 237 trace_radeon_fence_wait_begin(rdev->ddev, seq); 238 if (intr) { 239 radeon_irq_kms_sw_irq_get(rdev); 240 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 241 radeon_fence_signaled(fence), timeout); 242 radeon_irq_kms_sw_irq_put(rdev); 243 if (unlikely(r < 0)) { 244 return r; 245 } 246 } else { 247 radeon_irq_kms_sw_irq_get(rdev); 248 r = wait_event_timeout(rdev->fence_drv.queue, 249 radeon_fence_signaled(fence), timeout); 250 radeon_irq_kms_sw_irq_put(rdev); 251 } 252 trace_radeon_fence_wait_end(rdev->ddev, seq); 253 if (unlikely(!radeon_fence_signaled(fence))) { 254 /* we were interrupted for some reason and fence isn't 255 * isn't signaled yet, resume wait 256 */ 257 if (r) { 258 timeout = r; 259 goto retry; 260 } 261 /* don't protect read access to rdev->fence_drv.last_seq 262 * if we experiencing a lockup the value doesn't change 263 */ 264 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 265 /* good news we believe it's a lockup */ 266 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 267 fence->seq, seq); 268 /* FIXME: what should we do ? marking everyone 269 * as signaled for now 270 */ 271 rdev->gpu_lockup = true; 272 r = radeon_gpu_reset(rdev); 273 if (r) 274 return r; 275 radeon_fence_write(rdev, fence->seq); 276 rdev->gpu_lockup = false; 277 } 278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 279 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 280 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 281 rdev->fence_drv.last_jiffies = jiffies; 282 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 283 goto retry; 284 } 285 return 0; 286 } 287 288 int radeon_fence_wait_next(struct radeon_device *rdev) 289 { 290 unsigned long irq_flags; 291 struct radeon_fence *fence; 292 int r; 293 294 if (rdev->gpu_lockup) { 295 return 0; 296 } 297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 298 if (list_empty(&rdev->fence_drv.emited)) { 299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 300 return 0; 301 } 302 fence = list_entry(rdev->fence_drv.emited.next, 303 struct radeon_fence, list); 304 radeon_fence_ref(fence); 305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 306 r = radeon_fence_wait(fence, false); 307 radeon_fence_unref(&fence); 308 return r; 309 } 310 311 int radeon_fence_wait_last(struct radeon_device *rdev) 312 { 313 unsigned long irq_flags; 314 struct radeon_fence *fence; 315 int r; 316 317 if (rdev->gpu_lockup) { 318 return 0; 319 } 320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 321 if (list_empty(&rdev->fence_drv.emited)) { 322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 323 return 0; 324 } 325 fence = list_entry(rdev->fence_drv.emited.prev, 326 struct radeon_fence, list); 327 radeon_fence_ref(fence); 328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 329 r = radeon_fence_wait(fence, false); 330 radeon_fence_unref(&fence); 331 return r; 332 } 333 334 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 335 { 336 kref_get(&fence->kref); 337 return fence; 338 } 339 340 void radeon_fence_unref(struct radeon_fence **fence) 341 { 342 struct radeon_fence *tmp = *fence; 343 344 *fence = NULL; 345 if (tmp) { 346 kref_put(&tmp->kref, radeon_fence_destroy); 347 } 348 } 349 350 void radeon_fence_process(struct radeon_device *rdev) 351 { 352 unsigned long irq_flags; 353 bool wake; 354 355 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 356 wake = radeon_fence_poll_locked(rdev); 357 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 358 if (wake) { 359 wake_up_all(&rdev->fence_drv.queue); 360 } 361 } 362 363 int radeon_fence_driver_init(struct radeon_device *rdev) 364 { 365 unsigned long irq_flags; 366 int r; 367 368 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 369 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); 370 if (r) { 371 dev_err(rdev->dev, "fence failed to get scratch register\n"); 372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 373 return r; 374 } 375 radeon_fence_write(rdev, 0); 376 atomic_set(&rdev->fence_drv.seq, 0); 377 INIT_LIST_HEAD(&rdev->fence_drv.created); 378 INIT_LIST_HEAD(&rdev->fence_drv.emited); 379 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 380 init_waitqueue_head(&rdev->fence_drv.queue); 381 rdev->fence_drv.initialized = true; 382 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 383 if (radeon_debugfs_fence_init(rdev)) { 384 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 385 } 386 return 0; 387 } 388 389 void radeon_fence_driver_fini(struct radeon_device *rdev) 390 { 391 unsigned long irq_flags; 392 393 if (!rdev->fence_drv.initialized) 394 return; 395 wake_up_all(&rdev->fence_drv.queue); 396 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 397 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); 398 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 399 rdev->fence_drv.initialized = false; 400 } 401 402 403 /* 404 * Fence debugfs 405 */ 406 #if defined(CONFIG_DEBUG_FS) 407 static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 408 { 409 struct drm_info_node *node = (struct drm_info_node *)m->private; 410 struct drm_device *dev = node->minor->dev; 411 struct radeon_device *rdev = dev->dev_private; 412 struct radeon_fence *fence; 413 414 seq_printf(m, "Last signaled fence 0x%08X\n", 415 radeon_fence_read(rdev)); 416 if (!list_empty(&rdev->fence_drv.emited)) { 417 fence = list_entry(rdev->fence_drv.emited.prev, 418 struct radeon_fence, list); 419 seq_printf(m, "Last emited fence %p with 0x%08X\n", 420 fence, fence->seq); 421 } 422 return 0; 423 } 424 425 static struct drm_info_list radeon_debugfs_fence_list[] = { 426 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 427 }; 428 #endif 429 430 int radeon_debugfs_fence_init(struct radeon_device *rdev) 431 { 432 #if defined(CONFIG_DEBUG_FS) 433 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 434 #else 435 return 0; 436 #endif 437 } 438