1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "i915_drv.h" 8 9 #include "intel_engine.h" 10 #include "intel_engine_heartbeat.h" 11 #include "intel_engine_pm.h" 12 #include "intel_engine_pool.h" 13 #include "intel_gt.h" 14 #include "intel_gt_pm.h" 15 #include "intel_rc6.h" 16 #include "intel_ring.h" 17 18 static int __engine_unpark(struct intel_wakeref *wf) 19 { 20 struct intel_engine_cs *engine = 21 container_of(wf, typeof(*engine), wakeref); 22 void *map; 23 24 GEM_TRACE("%s\n", engine->name); 25 26 intel_gt_pm_get(engine->gt); 27 28 /* Pin the default state for fast resets from atomic context. */ 29 map = NULL; 30 if (engine->default_state) 31 map = i915_gem_object_pin_map(engine->default_state, 32 I915_MAP_WB); 33 if (!IS_ERR_OR_NULL(map)) 34 engine->pinned_default_state = map; 35 36 if (engine->unpark) 37 engine->unpark(engine); 38 39 intel_engine_unpark_heartbeat(engine); 40 return 0; 41 } 42 43 #if IS_ENABLED(CONFIG_LOCKDEP) 44 45 static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 46 { 47 unsigned long flags; 48 49 local_irq_save(flags); 50 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); 51 52 return flags; 53 } 54 55 static inline void __timeline_mark_unlock(struct intel_context *ce, 56 unsigned long flags) 57 { 58 mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); 59 local_irq_restore(flags); 60 } 61 62 #else 63 64 static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 65 { 66 return 0; 67 } 68 69 static inline void __timeline_mark_unlock(struct intel_context *ce, 70 unsigned long flags) 71 { 72 } 73 74 #endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ 75 76 static bool switch_to_kernel_context(struct intel_engine_cs *engine) 77 { 78 struct i915_request *rq; 79 unsigned long flags; 80 bool result = true; 81 82 /* Already inside the kernel context, safe to power down. */ 83 if (engine->wakeref_serial == engine->serial) 84 return true; 85 86 /* GPU is pointing to the void, as good as in the kernel context. */ 87 if (intel_gt_is_wedged(engine->gt)) 88 return true; 89 90 /* 91 * Note, we do this without taking the timeline->mutex. We cannot 92 * as we may be called while retiring the kernel context and so 93 * already underneath the timeline->mutex. Instead we rely on the 94 * exclusive property of the __engine_park that prevents anyone 95 * else from creating a request on this engine. This also requires 96 * that the ring is empty and we avoid any waits while constructing 97 * the context, as they assume protection by the timeline->mutex. 98 * This should hold true as we can only park the engine after 99 * retiring the last request, thus all rings should be empty and 100 * all timelines idle. 101 */ 102 flags = __timeline_mark_lock(engine->kernel_context); 103 104 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); 105 if (IS_ERR(rq)) 106 /* Context switch failed, hope for the best! Maybe reset? */ 107 goto out_unlock; 108 109 intel_timeline_enter(i915_request_timeline(rq)); 110 111 /* Check again on the next retirement. */ 112 engine->wakeref_serial = engine->serial + 1; 113 i915_request_add_active_barriers(rq); 114 115 /* Install ourselves as a preemption barrier */ 116 rq->sched.attr.priority = I915_PRIORITY_BARRIER; 117 __i915_request_commit(rq); 118 119 /* Release our exclusive hold on the engine */ 120 __intel_wakeref_defer_park(&engine->wakeref); 121 __i915_request_queue(rq, NULL); 122 123 result = false; 124 out_unlock: 125 __timeline_mark_unlock(engine->kernel_context, flags); 126 return result; 127 } 128 129 static void call_idle_barriers(struct intel_engine_cs *engine) 130 { 131 struct llist_node *node, *next; 132 133 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { 134 struct dma_fence_cb *cb = 135 container_of((struct list_head *)node, 136 typeof(*cb), node); 137 138 cb->func(NULL, cb); 139 } 140 } 141 142 static int __engine_park(struct intel_wakeref *wf) 143 { 144 struct intel_engine_cs *engine = 145 container_of(wf, typeof(*engine), wakeref); 146 147 engine->saturated = 0; 148 149 /* 150 * If one and only one request is completed between pm events, 151 * we know that we are inside the kernel context and it is 152 * safe to power down. (We are paranoid in case that runtime 153 * suspend causes corruption to the active context image, and 154 * want to avoid that impacting userspace.) 155 */ 156 if (!switch_to_kernel_context(engine)) 157 return -EBUSY; 158 159 GEM_TRACE("%s\n", engine->name); 160 161 call_idle_barriers(engine); /* cleanup after wedging */ 162 163 intel_engine_park_heartbeat(engine); 164 intel_engine_disarm_breadcrumbs(engine); 165 intel_engine_pool_park(&engine->pool); 166 167 /* Must be reset upon idling, or we may miss the busy wakeup. */ 168 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); 169 170 if (engine->park) 171 engine->park(engine); 172 173 if (engine->pinned_default_state) { 174 i915_gem_object_unpin_map(engine->default_state); 175 engine->pinned_default_state = NULL; 176 } 177 178 engine->execlists.no_priolist = false; 179 180 intel_gt_pm_put(engine->gt); 181 return 0; 182 } 183 184 static const struct intel_wakeref_ops wf_ops = { 185 .get = __engine_unpark, 186 .put = __engine_park, 187 }; 188 189 void intel_engine_init__pm(struct intel_engine_cs *engine) 190 { 191 struct intel_runtime_pm *rpm = engine->uncore->rpm; 192 193 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); 194 intel_engine_init_heartbeat(engine); 195 } 196 197 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 198 #include "selftest_engine_pm.c" 199 #endif 200