1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 8 #include "intel_breadcrumbs.h" 9 #include "intel_context.h" 10 #include "intel_engine.h" 11 #include "intel_engine_heartbeat.h" 12 #include "intel_engine_pm.h" 13 #include "intel_gt.h" 14 #include "intel_gt_pm.h" 15 #include "intel_rc6.h" 16 #include "intel_ring.h" 17 #include "shmem_utils.h" 18 #include "intel_gt_regs.h" 19 20 static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine) 21 { 22 struct drm_i915_private *i915 = engine->i915; 23 24 if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) { 25 intel_uncore_write(engine->gt->uncore, 26 RC_PSMI_CTRL_GSCCS, 27 _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE)); 28 /* hysteresis 0xA=5us as recommended in spec*/ 29 intel_uncore_write(engine->gt->uncore, 30 PWRCTX_MAXCNT_GSCCS, 31 0xA); 32 } 33 } 34 35 static void dbg_poison_ce(struct intel_context *ce) 36 { 37 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 38 return; 39 40 if (ce->state) { 41 struct drm_i915_gem_object *obj = ce->state->obj; 42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); 43 void *map; 44 45 if (!i915_gem_object_trylock(obj, NULL)) 46 return; 47 48 map = i915_gem_object_pin_map(obj, type); 49 if (!IS_ERR(map)) { 50 memset(map, CONTEXT_REDZONE, obj->base.size); 51 i915_gem_object_flush_map(obj); 52 i915_gem_object_unpin_map(obj); 53 } 54 i915_gem_object_unlock(obj); 55 } 56 } 57 58 static int __engine_unpark(struct intel_wakeref *wf) 59 { 60 struct intel_engine_cs *engine = 61 container_of(wf, typeof(*engine), wakeref); 62 struct intel_context *ce; 63 64 ENGINE_TRACE(engine, "\n"); 65 66 intel_gt_pm_get(engine->gt); 67 68 /* Discard stale context state from across idling */ 69 ce = engine->kernel_context; 70 if (ce) { 71 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); 72 73 /* Flush all pending HW writes before we touch the context */ 74 while (unlikely(intel_context_inflight(ce))) 75 intel_engine_flush_submission(engine); 76 77 /* First poison the image to verify we never fully trust it */ 78 dbg_poison_ce(ce); 79 80 /* Scrub the context image after our loss of control */ 81 ce->ops->reset(ce); 82 83 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n", 84 ce->timeline->seqno, 85 READ_ONCE(*ce->timeline->hwsp_seqno), 86 ce->ring->emit); 87 GEM_BUG_ON(ce->timeline->seqno != 88 READ_ONCE(*ce->timeline->hwsp_seqno)); 89 } 90 91 if (engine->unpark) 92 engine->unpark(engine); 93 94 intel_breadcrumbs_unpark(engine->breadcrumbs); 95 intel_engine_unpark_heartbeat(engine); 96 return 0; 97 } 98 99 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb) 100 { 101 struct i915_request *rq = to_request(fence); 102 103 ewma__engine_latency_add(&rq->engine->latency, 104 ktime_us_delta(rq->fence.timestamp, 105 rq->duration.emitted)); 106 } 107 108 static void 109 __queue_and_release_pm(struct i915_request *rq, 110 struct intel_timeline *tl, 111 struct intel_engine_cs *engine) 112 { 113 struct intel_gt_timelines *timelines = &engine->gt->timelines; 114 115 ENGINE_TRACE(engine, "parking\n"); 116 117 /* 118 * Open coded one half of intel_context_enter, which we have to omit 119 * here (see the large comment below) and because the other part must 120 * not be called due constructing directly with __i915_request_create 121 * which increments active count via intel_context_mark_active. 122 */ 123 GEM_BUG_ON(rq->context->active_count != 1); 124 __intel_gt_pm_get(engine->gt); 125 126 /* 127 * We have to serialise all potential retirement paths with our 128 * submission, as we don't want to underflow either the 129 * engine->wakeref.counter or our timeline->active_count. 130 * 131 * Equally, we cannot allow a new submission to start until 132 * after we finish queueing, nor could we allow that submitter 133 * to retire us before we are ready! 134 */ 135 spin_lock(&timelines->lock); 136 137 /* Let intel_gt_retire_requests() retire us (acquired under lock) */ 138 if (!atomic_fetch_inc(&tl->active_count)) 139 list_add_tail(&tl->link, &timelines->active_list); 140 141 /* Hand the request over to HW and so engine_retire() */ 142 __i915_request_queue_bh(rq); 143 144 /* Let new submissions commence (and maybe retire this timeline) */ 145 __intel_wakeref_defer_park(&engine->wakeref); 146 147 spin_unlock(&timelines->lock); 148 } 149 150 static bool switch_to_kernel_context(struct intel_engine_cs *engine) 151 { 152 struct intel_context *ce = engine->kernel_context; 153 struct i915_request *rq; 154 bool result = true; 155 156 /* 157 * This is execlist specific behaviour intended to ensure the GPU is 158 * idle by switching to a known 'safe' context. With GuC submission, the 159 * same idle guarantee is achieved by other means (disabling 160 * scheduling). Further, switching to a 'safe' context has no effect 161 * with GuC submission as the scheduler can just switch back again. 162 * 163 * FIXME: Move this backend scheduler specific behaviour into the 164 * scheduler backend. 165 */ 166 if (intel_engine_uses_guc(engine)) 167 return true; 168 169 /* GPU is pointing to the void, as good as in the kernel context. */ 170 if (intel_gt_is_wedged(engine->gt)) 171 return true; 172 173 GEM_BUG_ON(!intel_context_is_barrier(ce)); 174 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); 175 176 /* Already inside the kernel context, safe to power down. */ 177 if (engine->wakeref_serial == engine->serial) 178 return true; 179 180 /* 181 * Note, we do this without taking the timeline->mutex. We cannot 182 * as we may be called while retiring the kernel context and so 183 * already underneath the timeline->mutex. Instead we rely on the 184 * exclusive property of the __engine_park that prevents anyone 185 * else from creating a request on this engine. This also requires 186 * that the ring is empty and we avoid any waits while constructing 187 * the context, as they assume protection by the timeline->mutex. 188 * This should hold true as we can only park the engine after 189 * retiring the last request, thus all rings should be empty and 190 * all timelines idle. 191 * 192 * For unlocking, there are 2 other parties and the GPU who have a 193 * stake here. 194 * 195 * A new gpu user will be waiting on the engine-pm to start their 196 * engine_unpark. New waiters are predicated on engine->wakeref.count 197 * and so intel_wakeref_defer_park() acts like a mutex_unlock of the 198 * engine->wakeref. 199 * 200 * The other party is intel_gt_retire_requests(), which is walking the 201 * list of active timelines looking for completions. Meanwhile as soon 202 * as we call __i915_request_queue(), the GPU may complete our request. 203 * Ergo, if we put ourselves on the timelines.active_list 204 * (se intel_timeline_enter()) before we increment the 205 * engine->wakeref.count, we may see the request completion and retire 206 * it causing an underflow of the engine->wakeref. 207 */ 208 set_bit(CONTEXT_IS_PARKING, &ce->flags); 209 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); 210 211 rq = __i915_request_create(ce, GFP_NOWAIT); 212 if (IS_ERR(rq)) 213 /* Context switch failed, hope for the best! Maybe reset? */ 214 goto out_unlock; 215 216 /* Check again on the next retirement. */ 217 engine->wakeref_serial = engine->serial + 1; 218 i915_request_add_active_barriers(rq); 219 220 /* Install ourselves as a preemption barrier */ 221 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 222 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */ 223 /* 224 * Use an interrupt for precise measurement of duration, 225 * otherwise we rely on someone else retiring all the requests 226 * which may delay the signaling (i.e. we will likely wait 227 * until the background request retirement running every 228 * second or two). 229 */ 230 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq)); 231 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration); 232 rq->duration.emitted = ktime_get(); 233 } 234 235 /* Expose ourselves to the world */ 236 __queue_and_release_pm(rq, ce->timeline, engine); 237 238 result = false; 239 out_unlock: 240 clear_bit(CONTEXT_IS_PARKING, &ce->flags); 241 return result; 242 } 243 244 static void call_idle_barriers(struct intel_engine_cs *engine) 245 { 246 struct llist_node *node, *next; 247 248 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { 249 struct dma_fence_cb *cb = 250 container_of((struct list_head *)node, 251 typeof(*cb), node); 252 253 cb->func(ERR_PTR(-EAGAIN), cb); 254 } 255 } 256 257 static int __engine_park(struct intel_wakeref *wf) 258 { 259 struct intel_engine_cs *engine = 260 container_of(wf, typeof(*engine), wakeref); 261 262 engine->saturated = 0; 263 264 /* 265 * If one and only one request is completed between pm events, 266 * we know that we are inside the kernel context and it is 267 * safe to power down. (We are paranoid in case that runtime 268 * suspend causes corruption to the active context image, and 269 * want to avoid that impacting userspace.) 270 */ 271 if (!switch_to_kernel_context(engine)) 272 return -EBUSY; 273 274 ENGINE_TRACE(engine, "parked\n"); 275 276 call_idle_barriers(engine); /* cleanup after wedging */ 277 278 intel_engine_park_heartbeat(engine); 279 intel_breadcrumbs_park(engine->breadcrumbs); 280 281 if (engine->park) 282 engine->park(engine); 283 284 /* While gt calls i915_vma_parked(), we have to break the lock cycle */ 285 intel_gt_pm_put_async(engine->gt); 286 return 0; 287 } 288 289 static const struct intel_wakeref_ops wf_ops = { 290 .get = __engine_unpark, 291 .put = __engine_park, 292 }; 293 294 void intel_engine_init__pm(struct intel_engine_cs *engine) 295 { 296 intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops); 297 intel_engine_init_heartbeat(engine); 298 299 intel_gsc_idle_msg_enable(engine); 300 } 301 302 /** 303 * intel_engine_reset_pinned_contexts - Reset the pinned contexts of 304 * an engine. 305 * @engine: The engine whose pinned contexts we want to reset. 306 * 307 * Typically the pinned context LMEM images lose or get their content 308 * corrupted on suspend. This function resets their images. 309 */ 310 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine) 311 { 312 struct intel_context *ce; 313 314 list_for_each_entry(ce, &engine->pinned_contexts_list, 315 pinned_contexts_link) { 316 /* kernel context gets reset at __engine_unpark() */ 317 if (ce == engine->kernel_context) 318 continue; 319 320 dbg_poison_ce(ce); 321 ce->ops->reset(ce); 322 } 323 } 324 325 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 326 #include "selftest_engine_pm.c" 327 #endif 328