1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 9 #include "intel_engine.h" 10 #include "intel_engine_pm.h" 11 #include "intel_engine_pool.h" 12 #include "intel_gt.h" 13 #include "intel_gt_pm.h" 14 15 static int __engine_unpark(struct intel_wakeref *wf) 16 { 17 struct intel_engine_cs *engine = 18 container_of(wf, typeof(*engine), wakeref); 19 void *map; 20 21 GEM_TRACE("%s\n", engine->name); 22 23 intel_gt_pm_get(engine->gt); 24 25 /* Pin the default state for fast resets from atomic context. */ 26 map = NULL; 27 if (engine->default_state) 28 map = i915_gem_object_pin_map(engine->default_state, 29 I915_MAP_WB); 30 if (!IS_ERR_OR_NULL(map)) 31 engine->pinned_default_state = map; 32 33 if (engine->unpark) 34 engine->unpark(engine); 35 36 intel_engine_init_hangcheck(engine); 37 return 0; 38 } 39 40 #if IS_ENABLED(CONFIG_LOCKDEP) 41 42 static inline void __timeline_mark_lock(struct intel_context *ce) 43 { 44 unsigned long flags; 45 46 local_irq_save(flags); 47 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); 48 local_irq_restore(flags); 49 } 50 51 static inline void __timeline_mark_unlock(struct intel_context *ce) 52 { 53 mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); 54 } 55 56 #else 57 58 static inline void __timeline_mark_lock(struct intel_context *ce) 59 { 60 } 61 62 static inline void __timeline_mark_unlock(struct intel_context *ce) 63 { 64 } 65 66 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ 67 68 static bool switch_to_kernel_context(struct intel_engine_cs *engine) 69 { 70 struct i915_request *rq; 71 72 /* Already inside the kernel context, safe to power down. */ 73 if (engine->wakeref_serial == engine->serial) 74 return true; 75 76 /* GPU is pointing to the void, as good as in the kernel context. */ 77 if (intel_gt_is_wedged(engine->gt)) 78 return true; 79 80 /* 81 * Note, we do this without taking the timeline->mutex. We cannot 82 * as we may be called while retiring the kernel context and so 83 * already underneath the timeline->mutex. Instead we rely on the 84 * exclusive property of the __engine_park that prevents anyone 85 * else from creating a request on this engine. This also requires 86 * that the ring is empty and we avoid any waits while constructing 87 * the context, as they assume protection by the timeline->mutex. 88 * This should hold true as we can only park the engine after 89 * retiring the last request, thus all rings should be empty and 90 * all timelines idle. 91 */ 92 __timeline_mark_lock(engine->kernel_context); 93 94 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); 95 if (IS_ERR(rq)) 96 /* Context switch failed, hope for the best! Maybe reset? */ 97 return true; 98 99 intel_timeline_enter(rq->timeline); 100 101 /* Check again on the next retirement. */ 102 engine->wakeref_serial = engine->serial + 1; 103 i915_request_add_active_barriers(rq); 104 105 /* Install ourselves as a preemption barrier */ 106 rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE; 107 __i915_request_commit(rq); 108 109 /* Release our exclusive hold on the engine */ 110 __intel_wakeref_defer_park(&engine->wakeref); 111 __i915_request_queue(rq, NULL); 112 113 __timeline_mark_unlock(engine->kernel_context); 114 115 return false; 116 } 117 118 static int __engine_park(struct intel_wakeref *wf) 119 { 120 struct intel_engine_cs *engine = 121 container_of(wf, typeof(*engine), wakeref); 122 123 engine->saturated = 0; 124 125 /* 126 * If one and only one request is completed between pm events, 127 * we know that we are inside the kernel context and it is 128 * safe to power down. (We are paranoid in case that runtime 129 * suspend causes corruption to the active context image, and 130 * want to avoid that impacting userspace.) 131 */ 132 if (!switch_to_kernel_context(engine)) 133 return -EBUSY; 134 135 GEM_TRACE("%s\n", engine->name); 136 137 intel_engine_disarm_breadcrumbs(engine); 138 intel_engine_pool_park(&engine->pool); 139 140 /* Must be reset upon idling, or we may miss the busy wakeup. */ 141 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); 142 143 if (engine->park) 144 engine->park(engine); 145 146 if (engine->pinned_default_state) { 147 i915_gem_object_unpin_map(engine->default_state); 148 engine->pinned_default_state = NULL; 149 } 150 151 engine->execlists.no_priolist = false; 152 153 intel_gt_pm_put(engine->gt); 154 return 0; 155 } 156 157 static const struct intel_wakeref_ops wf_ops = { 158 .get = __engine_unpark, 159 .put = __engine_park, 160 }; 161 162 void intel_engine_init__pm(struct intel_engine_cs *engine) 163 { 164 struct intel_runtime_pm *rpm = &engine->i915->runtime_pm; 165 166 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); 167 } 168 169 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 170 #include "selftest_engine_pm.c" 171 #endif 172