1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 9 #include "intel_engine.h" 10 #include "intel_engine_pm.h" 11 #include "intel_engine_pool.h" 12 #include "intel_gt.h" 13 #include "intel_gt_pm.h" 14 #include "intel_rc6.h" 15 16 static int __engine_unpark(struct intel_wakeref *wf) 17 { 18 struct intel_engine_cs *engine = 19 container_of(wf, typeof(*engine), wakeref); 20 void *map; 21 22 GEM_TRACE("%s\n", engine->name); 23 24 intel_gt_pm_get(engine->gt); 25 26 /* Pin the default state for fast resets from atomic context. */ 27 map = NULL; 28 if (engine->default_state) 29 map = i915_gem_object_pin_map(engine->default_state, 30 I915_MAP_WB); 31 if (!IS_ERR_OR_NULL(map)) 32 engine->pinned_default_state = map; 33 34 if (engine->unpark) 35 engine->unpark(engine); 36 37 intel_engine_init_hangcheck(engine); 38 return 0; 39 } 40 41 #if IS_ENABLED(CONFIG_LOCKDEP) 42 43 static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 44 { 45 unsigned long flags; 46 47 local_irq_save(flags); 48 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); 49 50 return flags; 51 } 52 53 static inline void __timeline_mark_unlock(struct intel_context *ce, 54 unsigned long flags) 55 { 56 mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); 57 local_irq_restore(flags); 58 } 59 60 #else 61 62 static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 63 { 64 return 0; 65 } 66 67 static inline void __timeline_mark_unlock(struct intel_context *ce, 68 unsigned long flags) 69 { 70 } 71 72 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ 73 74 static bool switch_to_kernel_context(struct intel_engine_cs *engine) 75 { 76 struct i915_request *rq; 77 unsigned long flags; 78 bool result = true; 79 80 /* Already inside the kernel context, safe to power down. */ 81 if (engine->wakeref_serial == engine->serial) 82 return true; 83 84 /* GPU is pointing to the void, as good as in the kernel context. */ 85 if (intel_gt_is_wedged(engine->gt)) 86 return true; 87 88 /* 89 * Note, we do this without taking the timeline->mutex. We cannot 90 * as we may be called while retiring the kernel context and so 91 * already underneath the timeline->mutex. Instead we rely on the 92 * exclusive property of the __engine_park that prevents anyone 93 * else from creating a request on this engine. This also requires 94 * that the ring is empty and we avoid any waits while constructing 95 * the context, as they assume protection by the timeline->mutex. 96 * This should hold true as we can only park the engine after 97 * retiring the last request, thus all rings should be empty and 98 * all timelines idle. 99 */ 100 flags = __timeline_mark_lock(engine->kernel_context); 101 102 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); 103 if (IS_ERR(rq)) 104 /* Context switch failed, hope for the best! Maybe reset? */ 105 goto out_unlock; 106 107 intel_timeline_enter(i915_request_timeline(rq)); 108 109 /* Check again on the next retirement. */ 110 engine->wakeref_serial = engine->serial + 1; 111 i915_request_add_active_barriers(rq); 112 113 /* Install ourselves as a preemption barrier */ 114 rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE; 115 __i915_request_commit(rq); 116 117 /* Release our exclusive hold on the engine */ 118 __intel_wakeref_defer_park(&engine->wakeref); 119 __i915_request_queue(rq, NULL); 120 121 result = false; 122 out_unlock: 123 __timeline_mark_unlock(engine->kernel_context, flags); 124 return result; 125 } 126 127 static void call_idle_barriers(struct intel_engine_cs *engine) 128 { 129 struct llist_node *node, *next; 130 131 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { 132 struct dma_fence_cb *cb = 133 container_of((struct list_head *)node, 134 typeof(*cb), node); 135 136 cb->func(NULL, cb); 137 } 138 } 139 140 static int __engine_park(struct intel_wakeref *wf) 141 { 142 struct intel_engine_cs *engine = 143 container_of(wf, typeof(*engine), wakeref); 144 145 engine->saturated = 0; 146 147 /* 148 * If one and only one request is completed between pm events, 149 * we know that we are inside the kernel context and it is 150 * safe to power down. (We are paranoid in case that runtime 151 * suspend causes corruption to the active context image, and 152 * want to avoid that impacting userspace.) 153 */ 154 if (!switch_to_kernel_context(engine)) 155 return -EBUSY; 156 157 GEM_TRACE("%s\n", engine->name); 158 159 call_idle_barriers(engine); /* cleanup after wedging */ 160 161 intel_engine_disarm_breadcrumbs(engine); 162 intel_engine_pool_park(&engine->pool); 163 164 /* Must be reset upon idling, or we may miss the busy wakeup. */ 165 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); 166 167 if (engine->park) 168 engine->park(engine); 169 170 if (engine->pinned_default_state) { 171 i915_gem_object_unpin_map(engine->default_state); 172 engine->pinned_default_state = NULL; 173 } 174 175 engine->execlists.no_priolist = false; 176 177 intel_gt_pm_put(engine->gt); 178 return 0; 179 } 180 181 static const struct intel_wakeref_ops wf_ops = { 182 .get = __engine_unpark, 183 .put = __engine_park, 184 }; 185 186 void intel_engine_init__pm(struct intel_engine_cs *engine) 187 { 188 struct intel_runtime_pm *rpm = &engine->i915->runtime_pm; 189 190 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); 191 } 192 193 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 194 #include "selftest_engine_pm.c" 195 #endif 196