1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 9 #include "intel_engine.h" 10 #include "intel_engine_pm.h" 11 #include "intel_gt_pm.h" 12 13 static int __engine_unpark(struct intel_wakeref *wf) 14 { 15 struct intel_engine_cs *engine = 16 container_of(wf, typeof(*engine), wakeref); 17 void *map; 18 19 GEM_TRACE("%s\n", engine->name); 20 21 intel_gt_pm_get(engine->i915); 22 23 /* Pin the default state for fast resets from atomic context. */ 24 map = NULL; 25 if (engine->default_state) 26 map = i915_gem_object_pin_map(engine->default_state, 27 I915_MAP_WB); 28 if (!IS_ERR_OR_NULL(map)) 29 engine->pinned_default_state = map; 30 31 if (engine->unpark) 32 engine->unpark(engine); 33 34 intel_engine_init_hangcheck(engine); 35 return 0; 36 } 37 38 void intel_engine_pm_get(struct intel_engine_cs *engine) 39 { 40 intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark); 41 } 42 43 void intel_engine_park(struct intel_engine_cs *engine) 44 { 45 /* 46 * We are committed now to parking this engine, make sure there 47 * will be no more interrupts arriving later and the engine 48 * is truly idle. 49 */ 50 if (wait_for(intel_engine_is_idle(engine), 10)) { 51 struct drm_printer p = drm_debug_printer(__func__); 52 53 dev_err(engine->i915->drm.dev, 54 "%s is not idle before parking\n", 55 engine->name); 56 intel_engine_dump(engine, &p, NULL); 57 } 58 } 59 60 static bool switch_to_kernel_context(struct intel_engine_cs *engine) 61 { 62 struct i915_request *rq; 63 64 /* Already inside the kernel context, safe to power down. */ 65 if (engine->wakeref_serial == engine->serial) 66 return true; 67 68 /* GPU is pointing to the void, as good as in the kernel context. */ 69 if (i915_reset_failed(engine->i915)) 70 return true; 71 72 /* 73 * Note, we do this without taking the timeline->mutex. We cannot 74 * as we may be called while retiring the kernel context and so 75 * already underneath the timeline->mutex. Instead we rely on the 76 * exclusive property of the __engine_park that prevents anyone 77 * else from creating a request on this engine. This also requires 78 * that the ring is empty and we avoid any waits while constructing 79 * the context, as they assume protection by the timeline->mutex. 80 * This should hold true as we can only park the engine after 81 * retiring the last request, thus all rings should be empty and 82 * all timelines idle. 83 */ 84 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); 85 if (IS_ERR(rq)) 86 /* Context switch failed, hope for the best! Maybe reset? */ 87 return true; 88 89 /* Check again on the next retirement. */ 90 engine->wakeref_serial = engine->serial + 1; 91 92 i915_request_add_barriers(rq); 93 __i915_request_commit(rq); 94 95 return false; 96 } 97 98 static int __engine_park(struct intel_wakeref *wf) 99 { 100 struct intel_engine_cs *engine = 101 container_of(wf, typeof(*engine), wakeref); 102 103 engine->saturated = 0; 104 105 /* 106 * If one and only one request is completed between pm events, 107 * we know that we are inside the kernel context and it is 108 * safe to power down. (We are paranoid in case that runtime 109 * suspend causes corruption to the active context image, and 110 * want to avoid that impacting userspace.) 111 */ 112 if (!switch_to_kernel_context(engine)) 113 return -EBUSY; 114 115 GEM_TRACE("%s\n", engine->name); 116 117 intel_engine_disarm_breadcrumbs(engine); 118 119 /* Must be reset upon idling, or we may miss the busy wakeup. */ 120 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); 121 122 if (engine->park) 123 engine->park(engine); 124 125 if (engine->pinned_default_state) { 126 i915_gem_object_unpin_map(engine->default_state); 127 engine->pinned_default_state = NULL; 128 } 129 130 engine->execlists.no_priolist = false; 131 132 intel_gt_pm_put(engine->i915); 133 return 0; 134 } 135 136 void intel_engine_pm_put(struct intel_engine_cs *engine) 137 { 138 intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park); 139 } 140 141 void intel_engine_init__pm(struct intel_engine_cs *engine) 142 { 143 intel_wakeref_init(&engine->wakeref); 144 } 145