1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_context.h" 7 #include "gem/i915_gem_pm.h" 8 9 #include "i915_drv.h" 10 #include "i915_globals.h" 11 12 #include "intel_context.h" 13 #include "intel_engine.h" 14 #include "intel_engine_pm.h" 15 #include "intel_ring.h" 16 17 static struct i915_global_context { 18 struct i915_global base; 19 struct kmem_cache *slab_ce; 20 } global; 21 22 static struct intel_context *intel_context_alloc(void) 23 { 24 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 25 } 26 27 static void rcu_context_free(struct rcu_head *rcu) 28 { 29 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); 30 31 kmem_cache_free(global.slab_ce, ce); 32 } 33 34 void intel_context_free(struct intel_context *ce) 35 { 36 call_rcu(&ce->rcu, rcu_context_free); 37 } 38 39 struct intel_context * 40 intel_context_create(struct intel_engine_cs *engine) 41 { 42 struct intel_context *ce; 43 44 ce = intel_context_alloc(); 45 if (!ce) 46 return ERR_PTR(-ENOMEM); 47 48 intel_context_init(ce, engine); 49 return ce; 50 } 51 52 int intel_context_alloc_state(struct intel_context *ce) 53 { 54 int err = 0; 55 56 if (mutex_lock_interruptible(&ce->pin_mutex)) 57 return -EINTR; 58 59 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 60 if (intel_context_is_banned(ce)) { 61 err = -EIO; 62 goto unlock; 63 } 64 65 err = ce->ops->alloc(ce); 66 if (unlikely(err)) 67 goto unlock; 68 69 set_bit(CONTEXT_ALLOC_BIT, &ce->flags); 70 } 71 72 unlock: 73 mutex_unlock(&ce->pin_mutex); 74 return err; 75 } 76 77 static int intel_context_active_acquire(struct intel_context *ce) 78 { 79 int err; 80 81 __i915_active_acquire(&ce->active); 82 83 if (intel_context_is_barrier(ce)) 84 return 0; 85 86 /* Preallocate tracking nodes */ 87 err = i915_active_acquire_preallocate_barrier(&ce->active, 88 ce->engine); 89 if (err) 90 i915_active_release(&ce->active); 91 92 return err; 93 } 94 95 static void intel_context_active_release(struct intel_context *ce) 96 { 97 /* Nodes preallocated in intel_context_active() */ 98 i915_active_acquire_barrier(&ce->active); 99 i915_active_release(&ce->active); 100 } 101 102 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) 103 { 104 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; 105 int err; 106 107 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); 108 if (err) 109 return err; 110 111 err = i915_active_acquire(&vma->active); 112 if (err) 113 goto err_unpin; 114 115 /* 116 * And mark it as a globally pinned object to let the shrinker know 117 * it cannot reclaim the object until we release it. 118 */ 119 i915_vma_make_unshrinkable(vma); 120 vma->obj->mm.dirty = true; 121 122 return 0; 123 124 err_unpin: 125 i915_vma_unpin(vma); 126 return err; 127 } 128 129 static void __context_unpin_state(struct i915_vma *vma) 130 { 131 i915_vma_make_shrinkable(vma); 132 i915_active_release(&vma->active); 133 __i915_vma_unpin(vma); 134 } 135 136 static int __ring_active(struct intel_ring *ring, 137 struct i915_gem_ww_ctx *ww) 138 { 139 int err; 140 141 err = intel_ring_pin(ring, ww); 142 if (err) 143 return err; 144 145 err = i915_active_acquire(&ring->vma->active); 146 if (err) 147 goto err_pin; 148 149 return 0; 150 151 err_pin: 152 intel_ring_unpin(ring); 153 return err; 154 } 155 156 static void __ring_retire(struct intel_ring *ring) 157 { 158 i915_active_release(&ring->vma->active); 159 intel_ring_unpin(ring); 160 } 161 162 static int intel_context_pre_pin(struct intel_context *ce, 163 struct i915_gem_ww_ctx *ww) 164 { 165 int err; 166 167 CE_TRACE(ce, "active\n"); 168 169 err = __ring_active(ce->ring, ww); 170 if (err) 171 return err; 172 173 err = intel_timeline_pin(ce->timeline, ww); 174 if (err) 175 goto err_ring; 176 177 if (!ce->state) 178 return 0; 179 180 err = __context_pin_state(ce->state, ww); 181 if (err) 182 goto err_timeline; 183 184 185 return 0; 186 187 err_timeline: 188 intel_timeline_unpin(ce->timeline); 189 err_ring: 190 __ring_retire(ce->ring); 191 return err; 192 } 193 194 static void intel_context_post_unpin(struct intel_context *ce) 195 { 196 if (ce->state) 197 __context_unpin_state(ce->state); 198 199 intel_timeline_unpin(ce->timeline); 200 __ring_retire(ce->ring); 201 } 202 203 int __intel_context_do_pin_ww(struct intel_context *ce, 204 struct i915_gem_ww_ctx *ww) 205 { 206 bool handoff = false; 207 void *vaddr; 208 int err = 0; 209 210 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { 211 err = intel_context_alloc_state(ce); 212 if (err) 213 return err; 214 } 215 216 /* 217 * We always pin the context/ring/timeline here, to ensure a pin 218 * refcount for __intel_context_active(), which prevent a lock 219 * inversion of ce->pin_mutex vs dma_resv_lock(). 220 */ 221 222 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); 223 if (!err && ce->ring->vma->obj) 224 err = i915_gem_object_lock(ce->ring->vma->obj, ww); 225 if (!err && ce->state) 226 err = i915_gem_object_lock(ce->state->obj, ww); 227 if (!err) 228 err = intel_context_pre_pin(ce, ww); 229 if (err) 230 return err; 231 232 err = i915_active_acquire(&ce->active); 233 if (err) 234 goto err_ctx_unpin; 235 236 err = ce->ops->pre_pin(ce, ww, &vaddr); 237 if (err) 238 goto err_release; 239 240 err = mutex_lock_interruptible(&ce->pin_mutex); 241 if (err) 242 goto err_post_unpin; 243 244 if (unlikely(intel_context_is_closed(ce))) { 245 err = -ENOENT; 246 goto err_unlock; 247 } 248 249 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { 250 err = intel_context_active_acquire(ce); 251 if (unlikely(err)) 252 goto err_unlock; 253 254 err = ce->ops->pin(ce, vaddr); 255 if (err) { 256 intel_context_active_release(ce); 257 goto err_unlock; 258 } 259 260 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", 261 i915_ggtt_offset(ce->ring->vma), 262 ce->ring->head, ce->ring->tail); 263 264 handoff = true; 265 smp_mb__before_atomic(); /* flush pin before it is visible */ 266 atomic_inc(&ce->pin_count); 267 } 268 269 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ 270 271 err_unlock: 272 mutex_unlock(&ce->pin_mutex); 273 err_post_unpin: 274 if (!handoff) 275 ce->ops->post_unpin(ce); 276 err_release: 277 i915_active_release(&ce->active); 278 err_ctx_unpin: 279 intel_context_post_unpin(ce); 280 281 /* 282 * Unlock the hwsp_ggtt object since it's shared. 283 * In principle we can unlock all the global state locked above 284 * since it's pinned and doesn't need fencing, and will 285 * thus remain resident until it is explicitly unpinned. 286 */ 287 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); 288 289 return err; 290 } 291 292 int __intel_context_do_pin(struct intel_context *ce) 293 { 294 struct i915_gem_ww_ctx ww; 295 int err; 296 297 i915_gem_ww_ctx_init(&ww, true); 298 retry: 299 err = __intel_context_do_pin_ww(ce, &ww); 300 if (err == -EDEADLK) { 301 err = i915_gem_ww_ctx_backoff(&ww); 302 if (!err) 303 goto retry; 304 } 305 i915_gem_ww_ctx_fini(&ww); 306 return err; 307 } 308 309 void intel_context_unpin(struct intel_context *ce) 310 { 311 if (!atomic_dec_and_test(&ce->pin_count)) 312 return; 313 314 CE_TRACE(ce, "unpin\n"); 315 ce->ops->unpin(ce); 316 ce->ops->post_unpin(ce); 317 318 /* 319 * Once released, we may asynchronously drop the active reference. 320 * As that may be the only reference keeping the context alive, 321 * take an extra now so that it is not freed before we finish 322 * dereferencing it. 323 */ 324 intel_context_get(ce); 325 intel_context_active_release(ce); 326 intel_context_put(ce); 327 } 328 329 __i915_active_call 330 static void __intel_context_retire(struct i915_active *active) 331 { 332 struct intel_context *ce = container_of(active, typeof(*ce), active); 333 334 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", 335 intel_context_get_total_runtime_ns(ce), 336 intel_context_get_avg_runtime_ns(ce)); 337 338 set_bit(CONTEXT_VALID_BIT, &ce->flags); 339 intel_context_post_unpin(ce); 340 intel_context_put(ce); 341 } 342 343 static int __intel_context_active(struct i915_active *active) 344 { 345 struct intel_context *ce = container_of(active, typeof(*ce), active); 346 347 intel_context_get(ce); 348 349 /* everything should already be activated by intel_context_pre_pin() */ 350 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); 351 __intel_ring_pin(ce->ring); 352 353 __intel_timeline_pin(ce->timeline); 354 355 if (ce->state) { 356 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); 357 __i915_vma_pin(ce->state); 358 i915_vma_make_unshrinkable(ce->state); 359 } 360 361 return 0; 362 } 363 364 void 365 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) 366 { 367 GEM_BUG_ON(!engine->cops); 368 GEM_BUG_ON(!engine->gt->vm); 369 370 kref_init(&ce->ref); 371 372 ce->engine = engine; 373 ce->ops = engine->cops; 374 ce->sseu = engine->sseu; 375 ce->ring = __intel_context_ring_size(SZ_4K); 376 377 ewma_runtime_init(&ce->runtime.avg); 378 379 ce->vm = i915_vm_get(engine->gt->vm); 380 381 /* NB ce->signal_link/lock is used under RCU */ 382 spin_lock_init(&ce->signal_lock); 383 INIT_LIST_HEAD(&ce->signals); 384 385 mutex_init(&ce->pin_mutex); 386 387 i915_active_init(&ce->active, 388 __intel_context_active, __intel_context_retire); 389 } 390 391 void intel_context_fini(struct intel_context *ce) 392 { 393 if (ce->timeline) 394 intel_timeline_put(ce->timeline); 395 i915_vm_put(ce->vm); 396 397 mutex_destroy(&ce->pin_mutex); 398 i915_active_fini(&ce->active); 399 } 400 401 static void i915_global_context_shrink(void) 402 { 403 kmem_cache_shrink(global.slab_ce); 404 } 405 406 static void i915_global_context_exit(void) 407 { 408 kmem_cache_destroy(global.slab_ce); 409 } 410 411 static struct i915_global_context global = { { 412 .shrink = i915_global_context_shrink, 413 .exit = i915_global_context_exit, 414 } }; 415 416 int __init i915_global_context_init(void) 417 { 418 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); 419 if (!global.slab_ce) 420 return -ENOMEM; 421 422 i915_global_register(&global.base); 423 return 0; 424 } 425 426 void intel_context_enter_engine(struct intel_context *ce) 427 { 428 intel_engine_pm_get(ce->engine); 429 intel_timeline_enter(ce->timeline); 430 } 431 432 void intel_context_exit_engine(struct intel_context *ce) 433 { 434 intel_timeline_exit(ce->timeline); 435 intel_engine_pm_put(ce->engine); 436 } 437 438 int intel_context_prepare_remote_request(struct intel_context *ce, 439 struct i915_request *rq) 440 { 441 struct intel_timeline *tl = ce->timeline; 442 int err; 443 444 /* Only suitable for use in remotely modifying this context */ 445 GEM_BUG_ON(rq->context == ce); 446 447 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 448 /* Queue this switch after current activity by this context. */ 449 err = i915_active_fence_set(&tl->last_request, rq); 450 if (err) 451 return err; 452 } 453 454 /* 455 * Guarantee context image and the timeline remains pinned until the 456 * modifying request is retired by setting the ce activity tracker. 457 * 458 * But we only need to take one pin on the account of it. Or in other 459 * words transfer the pinned ce object to tracked active request. 460 */ 461 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 462 return i915_active_add_request(&ce->active, rq); 463 } 464 465 struct i915_request *intel_context_create_request(struct intel_context *ce) 466 { 467 struct i915_gem_ww_ctx ww; 468 struct i915_request *rq; 469 int err; 470 471 i915_gem_ww_ctx_init(&ww, true); 472 retry: 473 err = intel_context_pin_ww(ce, &ww); 474 if (!err) { 475 rq = i915_request_create(ce); 476 intel_context_unpin(ce); 477 } else if (err == -EDEADLK) { 478 err = i915_gem_ww_ctx_backoff(&ww); 479 if (!err) 480 goto retry; 481 rq = ERR_PTR(err); 482 } else { 483 rq = ERR_PTR(err); 484 } 485 486 i915_gem_ww_ctx_fini(&ww); 487 488 if (IS_ERR(rq)) 489 return rq; 490 491 /* 492 * timeline->mutex should be the inner lock, but is used as outer lock. 493 * Hack around this to shut up lockdep in selftests.. 494 */ 495 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); 496 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); 497 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); 498 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); 499 500 return rq; 501 } 502 503 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 504 #include "selftest_context.c" 505 #endif 506