1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_context.h" 7 #include "gem/i915_gem_pm.h" 8 9 #include "i915_drv.h" 10 #include "i915_globals.h" 11 12 #include "intel_context.h" 13 #include "intel_engine.h" 14 #include "intel_engine_pm.h" 15 #include "intel_ring.h" 16 17 static struct i915_global_context { 18 struct i915_global base; 19 struct kmem_cache *slab_ce; 20 } global; 21 22 static struct intel_context *intel_context_alloc(void) 23 { 24 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); 25 } 26 27 static void rcu_context_free(struct rcu_head *rcu) 28 { 29 struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); 30 31 kmem_cache_free(global.slab_ce, ce); 32 } 33 34 void intel_context_free(struct intel_context *ce) 35 { 36 call_rcu(&ce->rcu, rcu_context_free); 37 } 38 39 struct intel_context * 40 intel_context_create(struct intel_engine_cs *engine) 41 { 42 struct intel_context *ce; 43 44 ce = intel_context_alloc(); 45 if (!ce) 46 return ERR_PTR(-ENOMEM); 47 48 intel_context_init(ce, engine); 49 return ce; 50 } 51 52 int intel_context_alloc_state(struct intel_context *ce) 53 { 54 int err = 0; 55 56 if (mutex_lock_interruptible(&ce->pin_mutex)) 57 return -EINTR; 58 59 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 60 if (intel_context_is_banned(ce)) { 61 err = -EIO; 62 goto unlock; 63 } 64 65 err = ce->ops->alloc(ce); 66 if (unlikely(err)) 67 goto unlock; 68 69 set_bit(CONTEXT_ALLOC_BIT, &ce->flags); 70 } 71 72 unlock: 73 mutex_unlock(&ce->pin_mutex); 74 return err; 75 } 76 77 static int intel_context_active_acquire(struct intel_context *ce) 78 { 79 int err; 80 81 __i915_active_acquire(&ce->active); 82 83 if (intel_context_is_barrier(ce)) 84 return 0; 85 86 /* Preallocate tracking nodes */ 87 err = i915_active_acquire_preallocate_barrier(&ce->active, 88 ce->engine); 89 if (err) 90 i915_active_release(&ce->active); 91 92 return err; 93 } 94 95 static void intel_context_active_release(struct intel_context *ce) 96 { 97 /* Nodes preallocated in intel_context_active() */ 98 i915_active_acquire_barrier(&ce->active); 99 i915_active_release(&ce->active); 100 } 101 102 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) 103 { 104 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; 105 int err; 106 107 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); 108 if (err) 109 return err; 110 111 err = i915_active_acquire(&vma->active); 112 if (err) 113 goto err_unpin; 114 115 /* 116 * And mark it as a globally pinned object to let the shrinker know 117 * it cannot reclaim the object until we release it. 118 */ 119 i915_vma_make_unshrinkable(vma); 120 vma->obj->mm.dirty = true; 121 122 return 0; 123 124 err_unpin: 125 i915_vma_unpin(vma); 126 return err; 127 } 128 129 static void __context_unpin_state(struct i915_vma *vma) 130 { 131 i915_vma_make_shrinkable(vma); 132 i915_active_release(&vma->active); 133 __i915_vma_unpin(vma); 134 } 135 136 static int __ring_active(struct intel_ring *ring, 137 struct i915_gem_ww_ctx *ww) 138 { 139 int err; 140 141 err = intel_ring_pin(ring, ww); 142 if (err) 143 return err; 144 145 err = i915_active_acquire(&ring->vma->active); 146 if (err) 147 goto err_pin; 148 149 return 0; 150 151 err_pin: 152 intel_ring_unpin(ring); 153 return err; 154 } 155 156 static void __ring_retire(struct intel_ring *ring) 157 { 158 i915_active_release(&ring->vma->active); 159 intel_ring_unpin(ring); 160 } 161 162 static int intel_context_pre_pin(struct intel_context *ce, 163 struct i915_gem_ww_ctx *ww) 164 { 165 int err; 166 167 CE_TRACE(ce, "active\n"); 168 169 err = __ring_active(ce->ring, ww); 170 if (err) 171 return err; 172 173 err = intel_timeline_pin(ce->timeline, ww); 174 if (err) 175 goto err_ring; 176 177 if (!ce->state) 178 return 0; 179 180 err = __context_pin_state(ce->state, ww); 181 if (err) 182 goto err_timeline; 183 184 185 return 0; 186 187 err_timeline: 188 intel_timeline_unpin(ce->timeline); 189 err_ring: 190 __ring_retire(ce->ring); 191 return err; 192 } 193 194 static void intel_context_post_unpin(struct intel_context *ce) 195 { 196 if (ce->state) 197 __context_unpin_state(ce->state); 198 199 intel_timeline_unpin(ce->timeline); 200 __ring_retire(ce->ring); 201 } 202 203 int __intel_context_do_pin_ww(struct intel_context *ce, 204 struct i915_gem_ww_ctx *ww) 205 { 206 bool handoff = false; 207 void *vaddr; 208 int err = 0; 209 210 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { 211 err = intel_context_alloc_state(ce); 212 if (err) 213 return err; 214 } 215 216 /* 217 * We always pin the context/ring/timeline here, to ensure a pin 218 * refcount for __intel_context_active(), which prevent a lock 219 * inversion of ce->pin_mutex vs dma_resv_lock(). 220 */ 221 222 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); 223 if (!err && ce->ring->vma->obj) 224 err = i915_gem_object_lock(ce->ring->vma->obj, ww); 225 if (!err && ce->state) 226 err = i915_gem_object_lock(ce->state->obj, ww); 227 if (!err) 228 err = intel_context_pre_pin(ce, ww); 229 if (err) 230 return err; 231 232 err = i915_active_acquire(&ce->active); 233 if (err) 234 goto err_ctx_unpin; 235 236 err = ce->ops->pre_pin(ce, ww, &vaddr); 237 if (err) 238 goto err_release; 239 240 err = mutex_lock_interruptible(&ce->pin_mutex); 241 if (err) 242 goto err_post_unpin; 243 244 if (unlikely(intel_context_is_closed(ce))) { 245 err = -ENOENT; 246 goto err_unlock; 247 } 248 249 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { 250 err = intel_context_active_acquire(ce); 251 if (unlikely(err)) 252 goto err_unlock; 253 254 err = ce->ops->pin(ce, vaddr); 255 if (err) { 256 intel_context_active_release(ce); 257 goto err_unlock; 258 } 259 260 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", 261 i915_ggtt_offset(ce->ring->vma), 262 ce->ring->head, ce->ring->tail); 263 264 handoff = true; 265 smp_mb__before_atomic(); /* flush pin before it is visible */ 266 atomic_inc(&ce->pin_count); 267 } 268 269 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ 270 271 err_unlock: 272 mutex_unlock(&ce->pin_mutex); 273 err_post_unpin: 274 if (!handoff) 275 ce->ops->post_unpin(ce); 276 err_release: 277 i915_active_release(&ce->active); 278 err_ctx_unpin: 279 intel_context_post_unpin(ce); 280 281 /* 282 * Unlock the hwsp_ggtt object since it's shared. 283 * In principle we can unlock all the global state locked above 284 * since it's pinned and doesn't need fencing, and will 285 * thus remain resident until it is explicitly unpinned. 286 */ 287 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); 288 289 return err; 290 } 291 292 int __intel_context_do_pin(struct intel_context *ce) 293 { 294 struct i915_gem_ww_ctx ww; 295 int err; 296 297 i915_gem_ww_ctx_init(&ww, true); 298 retry: 299 err = __intel_context_do_pin_ww(ce, &ww); 300 if (err == -EDEADLK) { 301 err = i915_gem_ww_ctx_backoff(&ww); 302 if (!err) 303 goto retry; 304 } 305 i915_gem_ww_ctx_fini(&ww); 306 return err; 307 } 308 309 void intel_context_unpin(struct intel_context *ce) 310 { 311 if (!atomic_dec_and_test(&ce->pin_count)) 312 return; 313 314 CE_TRACE(ce, "unpin\n"); 315 ce->ops->unpin(ce); 316 ce->ops->post_unpin(ce); 317 318 /* 319 * Once released, we may asynchronously drop the active reference. 320 * As that may be the only reference keeping the context alive, 321 * take an extra now so that it is not freed before we finish 322 * dereferencing it. 323 */ 324 intel_context_get(ce); 325 intel_context_active_release(ce); 326 intel_context_put(ce); 327 } 328 329 static void __intel_context_retire(struct i915_active *active) 330 { 331 struct intel_context *ce = container_of(active, typeof(*ce), active); 332 333 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", 334 intel_context_get_total_runtime_ns(ce), 335 intel_context_get_avg_runtime_ns(ce)); 336 337 set_bit(CONTEXT_VALID_BIT, &ce->flags); 338 intel_context_post_unpin(ce); 339 intel_context_put(ce); 340 } 341 342 static int __intel_context_active(struct i915_active *active) 343 { 344 struct intel_context *ce = container_of(active, typeof(*ce), active); 345 346 intel_context_get(ce); 347 348 /* everything should already be activated by intel_context_pre_pin() */ 349 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); 350 __intel_ring_pin(ce->ring); 351 352 __intel_timeline_pin(ce->timeline); 353 354 if (ce->state) { 355 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); 356 __i915_vma_pin(ce->state); 357 i915_vma_make_unshrinkable(ce->state); 358 } 359 360 return 0; 361 } 362 363 void 364 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) 365 { 366 GEM_BUG_ON(!engine->cops); 367 GEM_BUG_ON(!engine->gt->vm); 368 369 kref_init(&ce->ref); 370 371 ce->engine = engine; 372 ce->ops = engine->cops; 373 ce->sseu = engine->sseu; 374 ce->ring = __intel_context_ring_size(SZ_4K); 375 376 ewma_runtime_init(&ce->runtime.avg); 377 378 ce->vm = i915_vm_get(engine->gt->vm); 379 380 /* NB ce->signal_link/lock is used under RCU */ 381 spin_lock_init(&ce->signal_lock); 382 INIT_LIST_HEAD(&ce->signals); 383 384 mutex_init(&ce->pin_mutex); 385 386 i915_active_init(&ce->active, 387 __intel_context_active, __intel_context_retire, 0); 388 } 389 390 void intel_context_fini(struct intel_context *ce) 391 { 392 if (ce->timeline) 393 intel_timeline_put(ce->timeline); 394 i915_vm_put(ce->vm); 395 396 mutex_destroy(&ce->pin_mutex); 397 i915_active_fini(&ce->active); 398 } 399 400 static void i915_global_context_shrink(void) 401 { 402 kmem_cache_shrink(global.slab_ce); 403 } 404 405 static void i915_global_context_exit(void) 406 { 407 kmem_cache_destroy(global.slab_ce); 408 } 409 410 static struct i915_global_context global = { { 411 .shrink = i915_global_context_shrink, 412 .exit = i915_global_context_exit, 413 } }; 414 415 int __init i915_global_context_init(void) 416 { 417 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); 418 if (!global.slab_ce) 419 return -ENOMEM; 420 421 i915_global_register(&global.base); 422 return 0; 423 } 424 425 void intel_context_enter_engine(struct intel_context *ce) 426 { 427 intel_engine_pm_get(ce->engine); 428 intel_timeline_enter(ce->timeline); 429 } 430 431 void intel_context_exit_engine(struct intel_context *ce) 432 { 433 intel_timeline_exit(ce->timeline); 434 intel_engine_pm_put(ce->engine); 435 } 436 437 int intel_context_prepare_remote_request(struct intel_context *ce, 438 struct i915_request *rq) 439 { 440 struct intel_timeline *tl = ce->timeline; 441 int err; 442 443 /* Only suitable for use in remotely modifying this context */ 444 GEM_BUG_ON(rq->context == ce); 445 446 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ 447 /* Queue this switch after current activity by this context. */ 448 err = i915_active_fence_set(&tl->last_request, rq); 449 if (err) 450 return err; 451 } 452 453 /* 454 * Guarantee context image and the timeline remains pinned until the 455 * modifying request is retired by setting the ce activity tracker. 456 * 457 * But we only need to take one pin on the account of it. Or in other 458 * words transfer the pinned ce object to tracked active request. 459 */ 460 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 461 return i915_active_add_request(&ce->active, rq); 462 } 463 464 struct i915_request *intel_context_create_request(struct intel_context *ce) 465 { 466 struct i915_gem_ww_ctx ww; 467 struct i915_request *rq; 468 int err; 469 470 i915_gem_ww_ctx_init(&ww, true); 471 retry: 472 err = intel_context_pin_ww(ce, &ww); 473 if (!err) { 474 rq = i915_request_create(ce); 475 intel_context_unpin(ce); 476 } else if (err == -EDEADLK) { 477 err = i915_gem_ww_ctx_backoff(&ww); 478 if (!err) 479 goto retry; 480 rq = ERR_PTR(err); 481 } else { 482 rq = ERR_PTR(err); 483 } 484 485 i915_gem_ww_ctx_fini(&ww); 486 487 if (IS_ERR(rq)) 488 return rq; 489 490 /* 491 * timeline->mutex should be the inner lock, but is used as outer lock. 492 * Hack around this to shut up lockdep in selftests.. 493 */ 494 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); 495 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); 496 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); 497 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); 498 499 return rq; 500 } 501 502 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 503 #include "selftest_context.c" 504 #endif 505